diff options
Diffstat (limited to 'drivers/scsi')
250 files changed, 13490 insertions, 6982 deletions
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index 2b1e0d503020..fb6444d0409c 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c @@ -1049,9 +1049,7 @@ static int tw_chrdev_open(struct inode *inode, struct file *file) static const struct file_operations tw_fops = { .owner = THIS_MODULE, .unlocked_ioctl = tw_chrdev_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = tw_chrdev_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, .open = tw_chrdev_open, .release = NULL, .llseek = noop_llseek, diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c index c25e8a54e869..3170b295a5da 100644 --- a/drivers/scsi/BusLogic.c +++ b/drivers/scsi/BusLogic.c @@ -134,7 +134,7 @@ static char *blogic_cmd_failure_reason; static void blogic_announce_drvr(struct blogic_adapter *adapter) { blogic_announce("***** BusLogic SCSI Driver Version " blogic_drvr_version " of " blogic_drvr_date " *****\n", adapter); - blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff " "<lnz@dandelion.com>\n", adapter); + blogic_announce("Copyright 1995-1998 by Leonard N. Zubkoff <lnz@dandelion.com>\n", adapter); } @@ -440,7 +440,7 @@ static int blogic_cmd(struct blogic_adapter *adapter, enum blogic_opcode opcode, goto done; } if (blogic_global_options.trace_config) - blogic_notice("blogic_cmd(%02X) Status = %02X: " "(Modify I/O Address)\n", adapter, opcode, statusreg.all); + blogic_notice("blogic_cmd(%02X) Status = %02X: (Modify I/O Address)\n", adapter, opcode, statusreg.all); result = 0; goto done; } @@ -716,23 +716,23 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) pci_addr = base_addr1 = pci_resource_start(pci_device, 1); if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { - blogic_err("BusLogic: Base Address0 0x%X not I/O for " "MultiMaster Host Adapter\n", NULL, base_addr0); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: Base Address0 0x%lX not I/O for MultiMaster Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { - blogic_err("BusLogic: Base Address1 0x%X not Memory for " "MultiMaster Host Adapter\n", NULL, base_addr1); - blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); + blogic_err("BusLogic: Base Address1 0x%lX not Memory for MultiMaster Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { - blogic_err("BusLogic: IRQ Channel %d invalid for " "MultiMaster Host Adapter\n", NULL, irq_ch); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: IRQ Channel %d invalid for MultiMaster Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { - blogic_notice("BusLogic: PCI MultiMaster Host Adapter " "detected at\n", NULL); - blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); + blogic_notice("BusLogic: PCI MultiMaster Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } /* Issue the Inquire PCI Host Adapter Information command to determine @@ -818,7 +818,7 @@ static int __init blogic_init_mm_probeinfo(struct blogic_adapter *adapter) nonpr_mmcount++; mmcount++; } else - blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); } /* If the AutoSCSI "Use Bus And Device # For PCI Scanning Seq." @@ -956,23 +956,23 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) pci_addr = base_addr1 = pci_resource_start(pci_device, 1); #ifdef CONFIG_SCSI_FLASHPOINT if (pci_resource_flags(pci_device, 0) & IORESOURCE_MEM) { - blogic_err("BusLogic: Base Address0 0x%X not I/O for " "FlashPoint Host Adapter\n", NULL, base_addr0); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: Base Address0 0x%lX not I/O for FlashPoint Host Adapter\n", NULL, base_addr0); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (pci_resource_flags(pci_device, 1) & IORESOURCE_IO) { - blogic_err("BusLogic: Base Address1 0x%X not Memory for " "FlashPoint Host Adapter\n", NULL, base_addr1); - blogic_err("at PCI Bus %d Device %d PCI Address 0x%X\n", NULL, bus, device, pci_addr); + blogic_err("BusLogic: Base Address1 0x%lX not Memory for FlashPoint Host Adapter\n", NULL, base_addr1); + blogic_err("at PCI Bus %d Device %d PCI Address 0x%lX\n", NULL, bus, device, pci_addr); continue; } if (irq_ch == 0) { - blogic_err("BusLogic: IRQ Channel %d invalid for " "FlashPoint Host Adapter\n", NULL, irq_ch); - blogic_err("at PCI Bus %d Device %d I/O Address 0x%X\n", NULL, bus, device, io_addr); + blogic_err("BusLogic: IRQ Channel %d invalid for FlashPoint Host Adapter\n", NULL, irq_ch); + blogic_err("at PCI Bus %d Device %d I/O Address 0x%lX\n", NULL, bus, device, io_addr); continue; } if (blogic_global_options.trace_probe) { - blogic_notice("BusLogic: FlashPoint Host Adapter " "detected at\n", NULL); - blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address " "0x%X PCI Address 0x%X\n", NULL, bus, device, io_addr, pci_addr); + blogic_notice("BusLogic: FlashPoint Host Adapter detected at\n", NULL); + blogic_notice("BusLogic: PCI Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX\n", NULL, bus, device, io_addr, pci_addr); } if (blogic_probeinfo_count < BLOGIC_MAX_ADAPTERS) { struct blogic_probeinfo *probeinfo = @@ -987,11 +987,11 @@ static int __init blogic_init_fp_probeinfo(struct blogic_adapter *adapter) probeinfo->pci_device = pci_dev_get(pci_device); fpcount++; } else - blogic_warn("BusLogic: Too many Host Adapters " "detected\n", NULL); + blogic_warn("BusLogic: Too many Host Adapters detected\n", NULL); #else - blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", NULL, bus, device); - blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, irq %d, " "but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); - blogic_err("BusLogic: support was omitted in this kernel " "configuration.\n", NULL); + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", NULL, bus, device); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, irq %d, but FlashPoint\n", NULL, io_addr, pci_addr, irq_ch); + blogic_err("BusLogic: support was omitted in this kernel configuration.\n", NULL); #endif } /* @@ -1099,9 +1099,9 @@ static bool blogic_failure(struct blogic_adapter *adapter, char *msg) if (adapter->adapter_bus_type == BLOGIC_PCI_BUS) { blogic_err("While configuring BusLogic PCI Host Adapter at\n", adapter); - blogic_err("Bus %d Device %d I/O Address 0x%X PCI Address 0x%X:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); + blogic_err("Bus %d Device %d I/O Address 0x%lX PCI Address 0x%lX:\n", adapter, adapter->bus, adapter->dev, adapter->io_addr, adapter->pci_addr); } else - blogic_err("While configuring BusLogic Host Adapter at " "I/O Address 0x%X:\n", adapter, adapter->io_addr); + blogic_err("While configuring BusLogic Host Adapter at I/O Address 0x%lX:\n", adapter, adapter->io_addr); blogic_err("%s FAILED - DETACHING\n", adapter, msg); if (blogic_cmd_failure_reason != NULL) blogic_err("ADDITIONAL FAILURE INFO - %s\n", adapter, @@ -1129,13 +1129,13 @@ static bool __init blogic_probe(struct blogic_adapter *adapter) fpinfo->present = false; if (!(FlashPoint_ProbeHostAdapter(fpinfo) == 0 && fpinfo->present)) { - blogic_err("BusLogic: FlashPoint Host Adapter detected at " "PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); - blogic_err("BusLogic: I/O Address 0x%X PCI Address 0x%X, " "but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); + blogic_err("BusLogic: FlashPoint Host Adapter detected at PCI Bus %d Device %d\n", adapter, adapter->bus, adapter->dev); + blogic_err("BusLogic: I/O Address 0x%lX PCI Address 0x%lX, but FlashPoint\n", adapter, adapter->io_addr, adapter->pci_addr); blogic_err("BusLogic: Probe Function failed to validate it.\n", adapter); return false; } if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Probe(0x%X): FlashPoint Found\n", adapter, adapter->io_addr); + blogic_notice("BusLogic_Probe(0x%lX): FlashPoint Found\n", adapter, adapter->io_addr); /* Indicate the Host Adapter Probe completed successfully. */ @@ -1152,7 +1152,7 @@ static bool __init blogic_probe(struct blogic_adapter *adapter) intreg.all = blogic_rdint(adapter); georeg.all = blogic_rdgeom(adapter); if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Probe(0x%X): Status 0x%02X, Interrupt 0x%02X, " "Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); + blogic_notice("BusLogic_Probe(0x%lX): Status 0x%02X, Interrupt 0x%02X, Geometry 0x%02X\n", adapter, adapter->io_addr, statusreg.all, intreg.all, georeg.all); if (statusreg.all == 0 || statusreg.sr.diag_active || statusreg.sr.cmd_param_busy || statusreg.sr.rsvd || statusreg.sr.cmd_invalid || intreg.ir.rsvd != 0) @@ -1231,7 +1231,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Active, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Active, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1251,7 +1251,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Diagnostic Completed, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Diagnostic Completed, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1267,7 +1267,7 @@ static bool blogic_hwreset(struct blogic_adapter *adapter, bool hard_reset) udelay(100); } if (blogic_global_options.trace_hw_reset) - blogic_notice("BusLogic_HardwareReset(0x%X): Host Adapter Ready, " "Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); + blogic_notice("BusLogic_HardwareReset(0x%lX): Host Adapter Ready, Status 0x%02X\n", adapter, adapter->io_addr, statusreg.all); if (timeout < 0) return false; /* @@ -1323,7 +1323,7 @@ static bool __init blogic_checkadapter(struct blogic_adapter *adapter) Provide tracing information if requested and return. */ if (blogic_global_options.trace_probe) - blogic_notice("BusLogic_Check(0x%X): MultiMaster %s\n", adapter, + blogic_notice("BusLogic_Check(0x%lX): MultiMaster %s\n", adapter, adapter->io_addr, (result ? "Found" : "Not Found")); return result; @@ -1836,7 +1836,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) int tgt_id; blogic_info("Configuring BusLogic Model %s %s%s%s%s SCSI Host Adapter\n", adapter, adapter->model, blogic_adapter_busnames[adapter->adapter_bus_type], (adapter->wide ? " Wide" : ""), (adapter->differential ? " Differential" : ""), (adapter->ultra ? " Ultra" : "")); - blogic_info(" Firmware Version: %s, I/O Address: 0x%X, " "IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); + blogic_info(" Firmware Version: %s, I/O Address: 0x%lX, IRQ Channel: %d/%s\n", adapter, adapter->fw_ver, adapter->io_addr, adapter->irq_ch, (adapter->level_int ? "Level" : "Edge")); if (adapter->adapter_bus_type != BLOGIC_PCI_BUS) { blogic_info(" DMA Channel: ", adapter); if (adapter->dma_ch > 0) @@ -1844,7 +1844,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) else blogic_info("None, ", adapter); if (adapter->bios_addr > 0) - blogic_info("BIOS Address: 0x%X, ", adapter, + blogic_info("BIOS Address: 0x%lX, ", adapter, adapter->bios_addr); else blogic_info("BIOS Address: None, ", adapter); @@ -1852,7 +1852,7 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) blogic_info(" PCI Bus: %d, Device: %d, Address: ", adapter, adapter->bus, adapter->dev); if (adapter->pci_addr > 0) - blogic_info("0x%X, ", adapter, adapter->pci_addr); + blogic_info("0x%lX, ", adapter, adapter->pci_addr); else blogic_info("Unassigned, ", adapter); } @@ -1932,10 +1932,10 @@ static bool __init blogic_reportconfig(struct blogic_adapter *adapter) blogic_info(" Disconnect/Reconnect: %s, Tagged Queuing: %s\n", adapter, discon_msg, tagq_msg); if (blogic_multimaster_type(adapter)) { - blogic_info(" Scatter/Gather Limit: %d of %d segments, " "Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); - blogic_info(" Driver Queue Depth: %d, " "Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); + blogic_info(" Scatter/Gather Limit: %d of %d segments, Mailboxes: %d\n", adapter, adapter->drvr_sglimit, adapter->adapter_sglimit, adapter->mbox_count); + blogic_info(" Driver Queue Depth: %d, Host Adapter Queue Depth: %d\n", adapter, adapter->drvr_qdepth, adapter->adapter_qdepth); } else - blogic_info(" Driver Queue Depth: %d, " "Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); + blogic_info(" Driver Queue Depth: %d, Scatter/Gather Limit: %d segments\n", adapter, adapter->drvr_qdepth, adapter->drvr_sglimit); blogic_info(" Tagged Queue Depth: ", adapter); common_tagq_depth = true; for (tgt_id = 1; tgt_id < adapter->maxdev; tgt_id++) @@ -2717,7 +2717,7 @@ static void blogic_scan_inbox(struct blogic_adapter *adapter) then there is most likely a bug in the Host Adapter firmware. */ - blogic_warn("Illegal CCB #%ld status %d in " "Incoming Mailbox\n", adapter, ccb->serial, ccb->status); + blogic_warn("Illegal CCB #%ld status %d in Incoming Mailbox\n", adapter, ccb->serial, ccb->status); } } next_inbox->comp_code = BLOGIC_INBOX_FREE; @@ -2752,7 +2752,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) if (ccb->opcode == BLOGIC_BDR) { int tgt_id = ccb->tgt_id; - blogic_warn("Bus Device Reset CCB #%ld to Target " "%d Completed\n", adapter, ccb->serial, tgt_id); + blogic_warn("Bus Device Reset CCB #%ld to Target %d Completed\n", adapter, ccb->serial, tgt_id); blogic_inc_count(&adapter->tgt_stats[tgt_id].bdr_done); adapter->tgt_flags[tgt_id].tagq_active = false; adapter->cmds_since_rst[tgt_id] = 0; @@ -2829,7 +2829,7 @@ static void blogic_process_ccbs(struct blogic_adapter *adapter) if (blogic_global_options.trace_err) { int i; blogic_notice("CCB #%ld Target %d: Result %X Host " - "Adapter Status %02X " "Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); + "Adapter Status %02X Target Status %02X\n", adapter, ccb->serial, ccb->tgt_id, command->result, ccb->adapter_status, ccb->tgt_status); blogic_notice("CDB ", adapter); for (i = 0; i < ccb->cdblen; i++) blogic_notice(" %02X", adapter, ccb->cdb[i]); @@ -3203,12 +3203,12 @@ static int blogic_qcmd_lck(struct scsi_cmnd *command, */ if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { spin_unlock_irq(adapter->scsi_host->host_lock); - blogic_warn("Unable to write Outgoing Mailbox - " "Pausing for 1 second\n", adapter); + blogic_warn("Unable to write Outgoing Mailbox - Pausing for 1 second\n", adapter); blogic_delay(1); spin_lock_irq(adapter->scsi_host->host_lock); if (!blogic_write_outbox(adapter, BLOGIC_MBOX_START, ccb)) { - blogic_warn("Still unable to write Outgoing Mailbox - " "Host Adapter Dead?\n", adapter); + blogic_warn("Still unable to write Outgoing Mailbox - Host Adapter Dead?\n", adapter); blogic_dealloc_ccb(ccb, 1); command->result = DID_ERROR << 16; command->scsi_done(command); @@ -3443,8 +3443,8 @@ static int blogic_diskparam(struct scsi_device *sdev, struct block_device *dev, if (diskparam->cylinders != saved_cyl) blogic_warn("Adopting Geometry %d/%d from Partition Table\n", adapter, diskparam->heads, diskparam->sectors); } else if (part_end_head > 0 || part_end_sector > 0) { - blogic_warn("Warning: Partition Table appears to " "have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); - blogic_warn("not compatible with current BusLogic " "Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); + blogic_warn("Warning: Partition Table appears to have Geometry %d/%d which is\n", adapter, part_end_head + 1, part_end_sector); + blogic_warn("not compatible with current BusLogic Host Adapter Geometry %d/%d\n", adapter, diskparam->heads, diskparam->sectors); } } kfree(buf); @@ -3689,7 +3689,7 @@ static int __init blogic_parseopts(char *options) blogic_probe_options.probe134 = true; break; default: - blogic_err("BusLogic: Invalid Driver Options " "(invalid I/O Address 0x%X)\n", NULL, io_addr); + blogic_err("BusLogic: Invalid Driver Options (invalid I/O Address 0x%lX)\n", NULL, io_addr); return 0; } } else if (blogic_parse(&options, "NoProbeISA")) @@ -3710,7 +3710,7 @@ static int __init blogic_parseopts(char *options) for (tgt_id = 0; tgt_id < BLOGIC_MAXDEV; tgt_id++) { unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth > BLOGIC_MAX_TAG_DEPTH) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->qdepth[tgt_id] = qdepth; @@ -3719,12 +3719,12 @@ static int __init blogic_parseopts(char *options) else if (*options == ']') break; else { - blogic_err("BusLogic: Invalid Driver Options " "(',' or ']' expected at '%s')\n", NULL, options); + blogic_err("BusLogic: Invalid Driver Options (',' or ']' expected at '%s')\n", NULL, options); return 0; } } if (*options != ']') { - blogic_err("BusLogic: Invalid Driver Options " "(']' expected at '%s')\n", NULL, options); + blogic_err("BusLogic: Invalid Driver Options (']' expected at '%s')\n", NULL, options); return 0; } else options++; @@ -3732,7 +3732,7 @@ static int __init blogic_parseopts(char *options) unsigned short qdepth = simple_strtoul(options, &options, 0); if (qdepth == 0 || qdepth > BLOGIC_MAX_TAG_DEPTH) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Queue Depth %d)\n", NULL, qdepth); + blogic_err("BusLogic: Invalid Driver Options (invalid Queue Depth %d)\n", NULL, qdepth); return 0; } drvr_opts->common_qdepth = qdepth; @@ -3778,7 +3778,7 @@ static int __init blogic_parseopts(char *options) unsigned short bus_settle_time = simple_strtoul(options, &options, 0); if (bus_settle_time > 5 * 60) { - blogic_err("BusLogic: Invalid Driver Options " "(invalid Bus Settle Time %d)\n", NULL, bus_settle_time); + blogic_err("BusLogic: Invalid Driver Options (invalid Bus Settle Time %d)\n", NULL, bus_settle_time); return 0; } drvr_opts->bus_settle_time = bus_settle_time; @@ -3803,14 +3803,14 @@ static int __init blogic_parseopts(char *options) if (*options == ',') options++; else if (*options != ';' && *options != '\0') { - blogic_err("BusLogic: Unexpected Driver Option '%s' " "ignored\n", NULL, options); + blogic_err("BusLogic: Unexpected Driver Option '%s' ignored\n", NULL, options); *options = '\0'; } } if (!(blogic_drvr_options_count == 0 || blogic_probeinfo_count == 0 || blogic_drvr_options_count == blogic_probeinfo_count)) { - blogic_err("BusLogic: Invalid Driver Options " "(all or no I/O Addresses must be specified)\n", NULL); + blogic_err("BusLogic: Invalid Driver Options (all or no I/O Addresses must be specified)\n", NULL); return 0; } /* @@ -3864,7 +3864,7 @@ static int __init blogic_setup(char *str) (void) get_options(str, ARRAY_SIZE(ints), ints); if (ints[0] != 0) { - blogic_err("BusLogic: Obsolete Command Line Entry " "Format Ignored\n", NULL); + blogic_err("BusLogic: Obsolete Command Line Entry Format Ignored\n", NULL); return 0; } if (str == NULL || *str == '\0') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 1b92f3c19ff3..a7881f8eb05e 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -68,6 +68,7 @@ comment "SCSI support type (disk, tape, CD-ROM)" config BLK_DEV_SD tristate "SCSI disk support" depends on SCSI + select BLK_DEV_INTEGRITY_T10 if BLK_DEV_INTEGRITY ---help--- If you want to use SCSI hard disks, Fibre Channel disks, Serial ATA (SATA) or Parallel ATA (PATA) hard disks, @@ -898,7 +899,7 @@ config SCSI_SNI_53C710 config 53C700_LE_ON_BE bool - depends on SCSI_LASI700 + depends on SCSI_LASI700 || SCSI_SNI_53C710 default y config SCSI_STEX diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c index 536426f25e86..f2f7e6e76c07 100644 --- a/drivers/scsi/NCR5380.c +++ b/drivers/scsi/NCR5380.c @@ -129,6 +129,9 @@ #define NCR5380_release_dma_irq(x) #endif +static unsigned int disconnect_mask = ~0; +module_param(disconnect_mask, int, 0444); + static int do_abort(struct Scsi_Host *); static void do_reset(struct Scsi_Host *); static void bus_reset_cleanup(struct Scsi_Host *); @@ -172,6 +175,19 @@ static inline void advance_sg_buffer(struct scsi_cmnd *cmd) } } +static inline void set_resid_from_SCp(struct scsi_cmnd *cmd) +{ + int resid = cmd->SCp.this_residual; + struct scatterlist *s = cmd->SCp.buffer; + + if (s) + while (!sg_is_last(s)) { + s = sg_next(s); + resid += s->length; + } + scsi_set_resid(cmd, resid); +} + /** * NCR5380_poll_politely2 - wait for two chip register values * @hostdata: host private data @@ -954,7 +970,8 @@ static bool NCR5380_select(struct Scsi_Host *instance, struct scsi_cmnd *cmd) int err; bool ret = true; bool can_disconnect = instance->irq != NO_IRQ && - cmd->cmnd[0] != REQUEST_SENSE; + cmd->cmnd[0] != REQUEST_SENSE && + (disconnect_mask & BIT(scmd_id(cmd))); NCR5380_dprint(NDEBUG_ARBITRATION, instance); dsprintk(NDEBUG_ARBITRATION, instance, "starting arbitration, id = %d\n", @@ -1379,7 +1396,7 @@ static void do_reset(struct Scsi_Host *instance) * MESSAGE OUT phase and sending an ABORT message. * @instance: relevant scsi host instance * - * Returns 0 on success, -1 on failure. + * Returns 0 on success, negative error code on failure. */ static int do_abort(struct Scsi_Host *instance) @@ -1404,7 +1421,7 @@ static int do_abort(struct Scsi_Host *instance) rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, SR_REQ, 10 * HZ); if (rc < 0) - goto timeout; + goto out; tmp = NCR5380_read(STATUS_REG) & PHASE_MASK; @@ -1415,7 +1432,7 @@ static int do_abort(struct Scsi_Host *instance) ICR_BASE | ICR_ASSERT_ATN | ICR_ASSERT_ACK); rc = NCR5380_poll_politely(hostdata, STATUS_REG, SR_REQ, 0, 3 * HZ); if (rc < 0) - goto timeout; + goto out; NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); } @@ -1424,17 +1441,17 @@ static int do_abort(struct Scsi_Host *instance) len = 1; phase = PHASE_MSGOUT; NCR5380_transfer_pio(instance, &phase, &len, &msgptr); + if (len) + rc = -ENXIO; /* * If we got here, and the command completed successfully, * we're about to go into bus free state. */ - return len ? -1 : 0; - -timeout: +out: NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); - return -1; + return rc; } /* @@ -1803,6 +1820,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance) cmd->result |= cmd->SCp.Status; cmd->result |= cmd->SCp.Message << 8; + set_resid_from_SCp(cmd); + if (cmd->cmnd[0] == REQUEST_SENSE) complete_cmd(instance, cmd); else { @@ -2264,7 +2283,7 @@ static int NCR5380_abort(struct scsi_cmnd *cmd) dsprintk(NDEBUG_ABORT, instance, "abort: cmd %p is connected\n", cmd); hostdata->connected = NULL; hostdata->dma_len = 0; - if (do_abort(instance)) { + if (do_abort(instance) < 0) { set_host_byte(cmd, DID_ERROR); complete_cmd(instance, cmd); result = FAILED; diff --git a/drivers/scsi/a3000.c b/drivers/scsi/a3000.c index 222c77c9621f..b6a0432f305a 100644 --- a/drivers/scsi/a3000.c +++ b/drivers/scsi/a3000.c @@ -39,7 +39,7 @@ static irqreturn_t a3000_intr(int irq, void *data) spin_unlock_irqrestore(instance->host_lock, flags); return IRQ_HANDLED; } - pr_warning("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); + pr_warn("Non-serviced A3000 SCSI-interrupt? ISTR = %02x\n", status); return IRQ_NONE; } diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c index 0ed3f806ace5..33dbc051bff9 100644 --- a/drivers/scsi/aacraid/aachba.c +++ b/drivers/scsi/aacraid/aachba.c @@ -535,7 +535,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr) if ((le32_to_cpu(get_name_reply->status) == CT_OK) && (get_name_reply->data[0] != '\0')) { char *sp = get_name_reply->data; - int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + int data_size = sizeof_field(struct aac_get_name_resp, data); sp[data_size - 1] = '\0'; while (*sp == ' ') @@ -574,7 +574,7 @@ static int aac_get_container_name(struct scsi_cmnd * scsicmd) dev = (struct aac_dev *)scsicmd->device->host->hostdata; - data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); + data_size = sizeof_field(struct aac_get_name_resp, data); cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); @@ -1477,6 +1477,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd struct aac_srb * srbcmd; u32 flag; u32 timeout; + struct aac_dev *dev = fib->dev; aac_fib_init(fib); switch(cmd->sc_data_direction){ @@ -1503,7 +1504,7 @@ static struct aac_srb * aac_scsi_common(struct fib * fib, struct scsi_cmnd * cmd srbcmd->flags = cpu_to_le32(flag); timeout = cmd->request->timeout/HZ; if (timeout == 0) - timeout = 1; + timeout = (dev->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT); srbcmd->timeout = cpu_to_le32(timeout); // timeout in seconds srbcmd->retry_limit = 0; /* Obsolete parameter */ srbcmd->cdb_size = cpu_to_le32(cmd->cmd_len); @@ -2467,13 +2468,13 @@ static int aac_read(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n", @@ -2559,13 +2560,13 @@ static int aac_write(struct scsi_cmnd * scsicmd) scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION; set_sense(&dev->fsa_dev[cid].sense_data, - HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE, + ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE, ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0); memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data, min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data), SCSI_SENSE_BUFFERSIZE)); scsicmd->scsi_done(scsicmd); - return 1; + return 0; } dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h index 3fa03230f6ba..e3e4ecbea726 100644 --- a/drivers/scsi/aacraid/aacraid.h +++ b/drivers/scsi/aacraid/aacraid.h @@ -85,7 +85,7 @@ enum { #define PMC_GLOBAL_INT_BIT0 0x00000001 #ifndef AAC_DRIVER_BUILD -# define AAC_DRIVER_BUILD 50877 +# define AAC_DRIVER_BUILD 50983 # define AAC_DRIVER_BRANCH "-custom" #endif #define MAXIMUM_NUM_CONTAINERS 32 @@ -108,6 +108,8 @@ enum { #define AAC_BUS_TARGET_LOOP (AAC_MAX_BUSES * AAC_MAX_TARGETS) #define AAC_MAX_NATIVE_SIZE 2048 #define FW_ERROR_BUFFER_SIZE 512 +#define AAC_SA_TIMEOUT 180 +#define AAC_ARC_TIMEOUT 60 #define get_bus_number(x) (x/AAC_MAX_TARGETS) #define get_target_number(x) (x%AAC_MAX_TARGETS) @@ -1328,7 +1330,7 @@ struct fib { #define AAC_DEVTYPE_ARC_RAW 2 #define AAC_DEVTYPE_NATIVE_RAW 3 -#define AAC_SAFW_RESCAN_DELAY (10 * HZ) +#define AAC_RESCAN_DELAY (10 * HZ) struct aac_hba_map_info { __le32 rmw_nexus; /* nexus for native HBA devices */ @@ -1601,6 +1603,7 @@ struct aac_dev struct fsa_dev_info *fsa_dev; struct task_struct *thread; struct delayed_work safw_rescan_work; + struct delayed_work src_reinit_aif_worker; int cardtype; /* *This lock will protect the two 32-bit @@ -1673,6 +1676,7 @@ struct aac_dev u8 adapter_shutdown; u32 handle_pci_error; bool init_reset; + u8 soft_reset_support; }; #define aac_adapter_interrupt(dev) \ @@ -2644,7 +2648,12 @@ int aac_scan_host(struct aac_dev *dev); static inline void aac_schedule_safw_scan_worker(struct aac_dev *dev) { - schedule_delayed_work(&dev->safw_rescan_work, AAC_SAFW_RESCAN_DELAY); + schedule_delayed_work(&dev->safw_rescan_work, AAC_RESCAN_DELAY); +} + +static inline void aac_schedule_src_reinit_aif_worker(struct aac_dev *dev) +{ + schedule_delayed_work(&dev->src_reinit_aif_worker, AAC_RESCAN_DELAY); } static inline void aac_safw_rescan_worker(struct work_struct *work) @@ -2658,10 +2667,10 @@ static inline void aac_safw_rescan_worker(struct work_struct *work) aac_scan_host(dev); } -static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev) +static inline void aac_cancel_rescan_worker(struct aac_dev *dev) { - if (dev->sa_firmware) - cancel_delayed_work_sync(&dev->safw_rescan_work); + cancel_delayed_work_sync(&dev->safw_rescan_work); + cancel_delayed_work_sync(&dev->src_reinit_aif_worker); } /* SCp.phase values */ @@ -2671,6 +2680,7 @@ static inline void aac_cancel_safw_rescan_worker(struct aac_dev *dev) #define AAC_OWNER_FIRMWARE 0x106 void aac_safw_rescan_worker(struct work_struct *work); +void aac_src_reinit_aif_worker(struct work_struct *work); int aac_acquire_irq(struct aac_dev *dev); void aac_free_irq(struct aac_dev *dev); int aac_setup_safw_adapter(struct aac_dev *dev); @@ -2728,6 +2738,7 @@ int aac_probe_container(struct aac_dev *dev, int cid); int _aac_rx_init(struct aac_dev *dev); int aac_rx_select_comm(struct aac_dev *dev, int comm); int aac_rx_deliver_producer(struct fib * fib); +void aac_reinit_aif(struct aac_dev *aac, unsigned int index); static inline int aac_is_src(struct aac_dev *dev) { diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c index d4fcfa1e54e0..f75878d773cf 100644 --- a/drivers/scsi/aacraid/comminit.c +++ b/drivers/scsi/aacraid/comminit.c @@ -571,6 +571,11 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev) else dev->sa_firmware = 0; + if (status[4] & le32_to_cpu(AAC_EXTOPT_SOFT_RESET)) + dev->soft_reset_support = 1; + else + dev->soft_reset_support = 0; + if ((dev->comm_interface == AAC_COMM_MESSAGE) && (status[2] > dev->base_size)) { aac_adapter_ioremap(dev, 0); diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index 2142a649e865..5a8a999606ea 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c @@ -232,6 +232,7 @@ struct fib *aac_fib_alloc_tag(struct aac_dev *dev, struct scsi_cmnd *scmd) fibptr->type = FSAFS_NTC_FIB_CONTEXT; fibptr->callback_data = NULL; fibptr->callback = NULL; + fibptr->flags = 0; return fibptr; } @@ -1463,6 +1464,14 @@ retry_next: } } +static void aac_schedule_bus_scan(struct aac_dev *aac) +{ + if (aac->sa_firmware) + aac_schedule_safw_scan_worker(aac); + else + aac_schedule_src_reinit_aif_worker(aac); +} + static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type) { int index, quirks; @@ -1638,7 +1647,7 @@ out: */ if (!retval && !is_kdump_kernel()) { dev_info(&aac->pdev->dev, "Scheduling bus rescan\n"); - aac_schedule_safw_scan_worker(aac); + aac_schedule_bus_scan(aac); } if (jafo) { @@ -1959,6 +1968,16 @@ int aac_scan_host(struct aac_dev *dev) return rcode; } +void aac_src_reinit_aif_worker(struct work_struct *work) +{ + struct aac_dev *dev = container_of(to_delayed_work(work), + struct aac_dev, src_reinit_aif_worker); + + wait_event(dev->scsi_host_ptr->host_wait, + !scsi_host_in_recovery(dev->scsi_host_ptr)); + aac_reinit_aif(dev, dev->cardtype); +} + /** * aac_handle_sa_aif Handle a message from the firmware * @dev: Which adapter this fib is from diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 644f7f5c61a2..ee6bc2f9b80a 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -27,7 +27,6 @@ #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/aer.h> -#include <linux/pci-aspm.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/spinlock.h> @@ -392,6 +391,7 @@ static int aac_slave_configure(struct scsi_device *sdev) int chn, tid; unsigned int depth = 0; unsigned int set_timeout = 0; + int timeout = 0; bool set_qd_dev_type = false; u8 devtype = 0; @@ -484,10 +484,13 @@ common_config: /* * Firmware has an individual device recovery time typically - * of 35 seconds, give us a margin. + * of 35 seconds, give us a margin. Thor devices can take longer in + * error recovery, hence different value. */ - if (set_timeout && sdev->request_queue->rq_timeout < (45 * HZ)) - blk_queue_rq_timeout(sdev->request_queue, 45*HZ); + if (set_timeout) { + timeout = aac->sa_firmware ? AAC_SA_TIMEOUT : AAC_ARC_TIMEOUT; + blk_queue_rq_timeout(sdev->request_queue, timeout * HZ); + } if (depth > 256) depth = 256; @@ -609,9 +612,13 @@ static struct device_attribute *aac_dev_attrs[] = { static int aac_ioctl(struct scsi_device *sdev, unsigned int cmd, void __user *arg) { + int retval; struct aac_dev *dev = (struct aac_dev *)sdev->host->hostdata; if (!capable(CAP_SYS_RAWIO)) return -EPERM; + retval = aac_adapter_check_health(dev); + if (retval) + return -EBUSY; return aac_do_ioctl(dev, cmd, arg); } @@ -1586,6 +1593,19 @@ static void aac_init_char(void) } } +void aac_reinit_aif(struct aac_dev *aac, unsigned int index) +{ + /* + * Firmware may send a AIF messages very early and the Driver may have + * ignored as it is not fully ready to process the messages. Send + * AIF to firmware so that if there are any unprocessed events they + * can be processed now. + */ + if (aac_drivers[index].quirks & AAC_QUIRK_SRC) + aac_intr_normal(aac, 0, 2, 0, NULL); + +} + static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned index = id->driver_data; @@ -1683,6 +1703,8 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&aac->scan_mutex); INIT_DELAYED_WORK(&aac->safw_rescan_work, aac_safw_rescan_worker); + INIT_DELAYED_WORK(&aac->src_reinit_aif_worker, + aac_src_reinit_aif_worker); /* * Map in the registers from the adapter. */ @@ -1873,7 +1895,7 @@ static int aac_suspend(struct pci_dev *pdev, pm_message_t state) struct aac_dev *aac = (struct aac_dev *)shost->hostdata; scsi_block_requests(shost); - aac_cancel_safw_rescan_worker(aac); + aac_cancel_rescan_worker(aac); aac_send_shutdown(aac); aac_release_resources(aac); @@ -1932,7 +1954,7 @@ static void aac_remove_one(struct pci_dev *pdev) struct Scsi_Host *shost = pci_get_drvdata(pdev); struct aac_dev *aac = (struct aac_dev *)shost->hostdata; - aac_cancel_safw_rescan_worker(aac); + aac_cancel_rescan_worker(aac); scsi_remove_host(shost); __aac_shutdown(aac); @@ -1990,7 +2012,7 @@ static pci_ers_result_t aac_pci_error_detected(struct pci_dev *pdev, aac->handle_pci_error = 1; scsi_block_requests(aac->scsi_host_ptr); - aac_cancel_safw_rescan_worker(aac); + aac_cancel_rescan_worker(aac); aac_flush_ios(aac); aac_release_resources(aac); diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c index 3b66e06726c8..787ec9baebb0 100644 --- a/drivers/scsi/aacraid/src.c +++ b/drivers/scsi/aacraid/src.c @@ -733,10 +733,20 @@ static bool aac_is_ctrl_up_and_running(struct aac_dev *dev) return ctrl_up; } +static void aac_src_drop_io(struct aac_dev *dev) +{ + if (!dev->soft_reset_support) + return; + + aac_adapter_sync_cmd(dev, DROP_IO, + 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); +} + static void aac_notify_fw_of_iop_reset(struct aac_dev *dev) { aac_adapter_sync_cmd(dev, IOP_RESET_ALWAYS, 0, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL); + aac_src_drop_io(dev); } static void aac_send_iop_reset(struct aac_dev *dev) diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c index 8466aa784ec1..8b891a05d9e7 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c @@ -293,7 +293,7 @@ ahd_linux_pci_reserve_mem_region(struct ahd_softc *ahd, if (!request_mem_region(start, 0x1000, "aic79xx")) error = ENOMEM; if (!error) { - *maddr = ioremap_nocache(base_page, base_offset + 512); + *maddr = ioremap(base_page, base_offset + 512); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c index a9d40d3b90ef..4190a025381a 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_core.c +++ b/drivers/scsi/aic7xxx/aic7xxx_core.c @@ -2314,7 +2314,7 @@ ahc_find_syncrate(struct ahc_softc *ahc, u_int *period, * At some speeds, we only support * ST transfers. */ - if ((syncrate->sxfr_u2 & ST_SXFR) != 0) + if ((syncrate->sxfr_u2 & ST_SXFR) != 0) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; break; } diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c index 717d8d1082ce..9b293b1f0b71 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_osm_pci.c @@ -372,7 +372,7 @@ ahc_linux_pci_reserve_mem_region(struct ahc_softc *ahc, if (!request_mem_region(start, 0x1000, "aic7xxx")) error = ENOMEM; if (error == 0) { - *maddr = ioremap_nocache(start, 256); + *maddr = ioremap(start, 256); if (*maddr == NULL) { error = ENOMEM; release_mem_region(start, 0x1000); diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c index 261d8e495fed..d022407e5645 100644 --- a/drivers/scsi/aic94xx/aic94xx_init.c +++ b/drivers/scsi/aic94xx/aic94xx_init.c @@ -54,6 +54,9 @@ static struct scsi_host_template aic94xx_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .track_queue_depth = 1, }; @@ -565,8 +568,7 @@ static void asd_destroy_ha_caches(struct asd_ha_struct *asd_ha) if (asd_ha->hw_prof.scb_ext) asd_free_coherent(asd_ha, asd_ha->hw_prof.scb_ext); - if (asd_ha->hw_prof.ddb_bitmap) - kfree(asd_ha->hw_prof.ddb_bitmap); + kfree(asd_ha->hw_prof.ddb_bitmap); asd_ha->hw_prof.ddb_bitmap = NULL; for (i = 0; i < ASD_MAX_PHYS; i++) { @@ -641,12 +643,10 @@ Err: static void asd_destroy_global_caches(void) { - if (asd_dma_token_cache) - kmem_cache_destroy(asd_dma_token_cache); + kmem_cache_destroy(asd_dma_token_cache); asd_dma_token_cache = NULL; - if (asd_ascb_cache) - kmem_cache_destroy(asd_ascb_cache); + kmem_cache_destroy(asd_ascb_cache); asd_ascb_cache = NULL; } diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c index 88053b15c363..40dc8eac0e3a 100644 --- a/drivers/scsi/arcmsr/arcmsr_hba.c +++ b/drivers/scsi/arcmsr/arcmsr_hba.c @@ -270,7 +270,7 @@ static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) break; } case ACB_ADAPTER_TYPE_C:{ - acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); + acb->pmuC = ioremap(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; @@ -1400,7 +1400,7 @@ static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct Comma , pCCB->acb , pCCB->startdone , atomic_read(&acb->ccboutstandingcount)); - return; + return; } arcmsr_report_ccb_state(acb, pCCB, error); } @@ -3476,8 +3476,8 @@ polling_hbc_ccb_retry: , pCCB->pcmd->device->id , (u32)pCCB->pcmd->device->lun , pCCB); - pCCB->pcmd->result = DID_ABORT << 16; - arcmsr_ccb_complete(pCCB); + pCCB->pcmd->result = DID_ABORT << 16; + arcmsr_ccb_complete(pCCB); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c index d12dd89538df..ddb52e7ba622 100644 --- a/drivers/scsi/arm/acornscsi.c +++ b/drivers/scsi/arm/acornscsi.c @@ -1067,7 +1067,7 @@ void acornscsi_dma_setup(AS_Host *host, dmadir_t direction) * Purpose : ensure that all DMA transfers are up-to-date & host->scsi.SCp is correct * Params : host - host to finish * Notes : This is called when a command is: - * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONECT + * terminating, RESTORE_POINTERS, SAVE_POINTERS, DISCONNECT * : This must not return until all transfers are completed. */ static @@ -1816,7 +1816,7 @@ int acornscsi_reconnect(AS_Host *host) } /* - * Function: int acornscsi_reconect_finish(AS_Host *host) + * Function: int acornscsi_reconnect_finish(AS_Host *host) * Purpose : finish reconnecting a command * Params : host - host to complete * Returns : 0 if failed diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index e809493d0d06..a82b63a66635 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c @@ -742,7 +742,7 @@ static int __init atari_scsi_probe(struct platform_device *pdev) atari_scsi_template.sg_tablesize = SG_ALL; } else { atari_scsi_template.can_queue = 1; - atari_scsi_template.sg_tablesize = SG_NONE; + atari_scsi_template.sg_tablesize = 1; } if (setup_can_queue > 0) @@ -751,8 +751,8 @@ static int __init atari_scsi_probe(struct platform_device *pdev) if (setup_cmd_per_lun > 0) atari_scsi_template.cmd_per_lun = setup_cmd_per_lun; - /* Leave sg_tablesize at 0 on a Falcon! */ - if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize >= 0) + /* Don't increase sg_tablesize on Falcon! */ + if (ATARIHW_PRESENT(TT_SCSI) && setup_sg_tablesize > 0) atari_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) { diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c index e41f0bbdc9fd..c6a752309dda 100644 --- a/drivers/scsi/atp870u.c +++ b/drivers/scsi/atp870u.c @@ -1680,7 +1680,7 @@ static struct scsi_host_template atp870u_template = { .bios_param = atp870u_biosparam /* biosparm */, .can_queue = qcnt /* can_queue */, .this_id = 7 /* SCSI ID */, - .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/ /*SG_NONE*/, + .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/, .max_sectors = ATP870U_MAX_SECTORS, }; diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 063dccc18f70..5f9f0b18ddf3 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -1300,7 +1300,7 @@ struct be_cmd_get_port_name { /* Returns the number of items in the field array. */ #define BE_NUMBER_OF_FIELD(_type_, _field_) \ - (FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ + (sizeof_field(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\ /** * Different types of iSCSI completions to host driver for both initiator diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0760d0bd8a10..9b81cfbbc5c5 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -453,14 +453,14 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, u8 __iomem *addr; int pcicfg_reg; - addr = ioremap_nocache(pci_resource_start(pcidev, 2), + addr = ioremap(pci_resource_start(pcidev, 2), pci_resource_len(pcidev, 2)); if (addr == NULL) return -ENOMEM; phba->ctrl.csr = addr; phba->csr_va = addr; - addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024); + addr = ioremap(pci_resource_start(pcidev, 4), 128 * 1024); if (addr == NULL) goto pci_map_err; phba->ctrl.db = addr; @@ -471,7 +471,7 @@ static int beiscsi_map_pci_bars(struct beiscsi_hba *phba, else pcicfg_reg = 0; - addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg), + addr = ioremap(pci_resource_start(pcidev, pcicfg_reg), pci_resource_len(pcidev, pcicfg_reg)); if (addr == NULL) diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c index 2f9213b257a4..eb0c76338295 100644 --- a/drivers/scsi/bfa/bfad.c +++ b/drivers/scsi/bfa/bfad.c @@ -1487,8 +1487,7 @@ bfad_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) return ret; } -int -restart_bfa(struct bfad_s *bfad) +static int restart_bfa(struct bfad_s *bfad) { unsigned long flags; struct pci_dev *pdev = bfad->pcidev; diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c index 29ab81df75c0..fbfce02e5b93 100644 --- a/drivers/scsi/bfa/bfad_attr.c +++ b/drivers/scsi/bfa/bfad_attr.c @@ -275,8 +275,10 @@ bfad_im_get_stats(struct Scsi_Host *shost) rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa), fcstats, bfad_hcb_comp, &fcomp); spin_unlock_irqrestore(&bfad->bfad_lock, flags); - if (rc != BFA_STATUS_OK) + if (rc != BFA_STATUS_OK) { + kfree(fcstats); return NULL; + } wait_for_completion(&fcomp.comp); diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c index b2014cb96f58..22f06be2606f 100644 --- a/drivers/scsi/bfa/bfad_im.c +++ b/drivers/scsi/bfa/bfad_im.c @@ -536,7 +536,7 @@ bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port, struct device *dev) { struct bfad_im_port_pointer *im_portp; - int error = 1; + int error; mutex_lock(&bfad_mutex); error = idr_alloc(&bfad_im_port_index, im_port, 0, 0, GFP_KERNEL); diff --git a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h index e4469df9c469..698f5ebaa0c2 100644 --- a/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h +++ b/drivers/scsi/bnx2fc/57xx_hsi_bnx2fc.h @@ -813,7 +813,7 @@ struct fcoe_confqe { /* - * FCoE conection data base + * FCoE connection data base */ struct fcoe_conn_db { #if defined(__BIG_ENDIAN) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 7796799bf04a..b4bfab5edf8f 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -346,7 +346,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } @@ -428,7 +428,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, struct fc_lport *lport; struct bnx2fc_interface *interface; struct fcoe_ctlr *ctlr; - struct fc_frame_header *fh; struct fcoe_rcv_info *fr; struct fcoe_percpu_s *bg; struct sk_buff *tmp_skb; @@ -463,7 +462,6 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev, goto err; skb_set_transport_header(skb, sizeof(struct fcoe_hdr)); - fh = (struct fc_frame_header *) skb_transport_header(skb); fr = fcoe_dev_from_skb(skb); fr->fr_dev = lport; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 747f019fb393..6f8335ddb1f2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -633,7 +633,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) u16 xid; u32 frame_len, len; struct bnx2fc_cmd *io_req = NULL; - struct fcoe_task_ctx_entry *task, *task_page; struct bnx2fc_interface *interface = tgt->port->priv; struct bnx2fc_hba *hba = interface->hba; int task_idx, index; @@ -711,9 +710,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; - task_page = (struct fcoe_task_ctx_entry *) - hba->task_ctx[task_idx]; - task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) @@ -839,9 +835,6 @@ ret_err_rqe: task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; - task_page = (struct fcoe_task_ctx_entry *) - interface->hba->task_ctx[task_idx]; - task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) goto ret_warn_rqe; @@ -1122,7 +1115,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; - struct fcoe_port *port; struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; @@ -1136,7 +1128,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, } BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); - port = tgt->port; interface = tgt->port->priv; if (hba != interface->hba) { printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); @@ -1423,7 +1414,7 @@ int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); - tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); + tgt->ctx_base = ioremap(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; @@ -1463,10 +1454,7 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, { struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; struct bnx2fc_rport *tgt = seq_clnp_req->tgt; - struct bnx2fc_interface *interface = tgt->port->priv; struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; - struct fcoe_task_ctx_entry *orig_task; - struct fcoe_task_ctx_entry *task_page; struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; u8 orig_task_type; @@ -1528,10 +1516,6 @@ void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; index = orig_xid % BNX2FC_TASKS_PER_PAGE; - task_page = (struct fcoe_task_ctx_entry *) - interface->hba->task_ctx[orig_task_idx]; - orig_task = &(task_page[index]); - /* Multiple SGEs were used for this IO */ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 9e50e5b53763..4c8122a82322 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -930,7 +930,6 @@ abts_err: int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, enum fc_rctl r_ctl) { - struct fc_lport *lport; struct bnx2fc_rport *tgt = orig_io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; @@ -948,7 +947,6 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset, port = orig_io_req->port; interface = port->priv; - lport = port->lport; cb_arg = kzalloc(sizeof(struct bnx2fc_els_cb_arg), GFP_ATOMIC); if (!cb_arg) { @@ -999,7 +997,6 @@ cleanup_err: int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) { - struct fc_lport *lport; struct bnx2fc_rport *tgt = io_req->tgt; struct bnx2fc_interface *interface; struct fcoe_port *port; @@ -1015,7 +1012,6 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req) port = io_req->port; interface = port->priv; - lport = port->lport; cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP); if (!cleanup_io_req) { @@ -1246,7 +1242,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd) /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */ time_left = wait_for_completion_timeout(&io_req->abts_done, - (2 * rp->r_a_tov + 1) * HZ); + msecs_to_jiffies(2 * rp->r_a_tov + 1)); if (time_left) BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for abts_done"); @@ -1927,8 +1923,7 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, struct fcoe_fcp_rsp_payload *fcp_rsp; struct bnx2fc_rport *tgt = io_req->tgt; struct scsi_cmnd *sc_cmd; - struct Scsi_Host *host; - + u16 scope = 0, qualifier = 0; /* scsi_cmd_cmpl is called with tgt lock held */ @@ -1957,7 +1952,6 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, /* parse fcp_rsp and obtain sense data from RQ if available */ bnx2fc_parse_fcp_rsp(io_req, fcp_rsp, num_rq); - host = sc_cmd->device->host; if (!sc_cmd->SCp.ptr) { printk(KERN_ERR PFX "SCp.ptr is NULL\n"); return; @@ -1997,12 +1991,30 @@ void bnx2fc_process_scsi_cmd_compl(struct bnx2fc_cmd *io_req, if (io_req->cdb_status == SAM_STAT_TASK_SET_FULL || io_req->cdb_status == SAM_STAT_BUSY) { - /* Set the jiffies + retry_delay_timer * 100ms - for the rport/tgt */ - tgt->retry_delay_timestamp = jiffies + - fcp_rsp->retry_delay_timer * HZ / 10; + /* Newer array firmware with BUSY or + * TASK_SET_FULL may return a status that needs + * the scope bits masked. + * Or a huge delay timestamp up to 27 minutes + * can result. + */ + if (fcp_rsp->retry_delay_timer) { + /* Upper 2 bits */ + scope = fcp_rsp->retry_delay_timer + & 0xC000; + /* Lower 14 bits */ + qualifier = fcp_rsp->retry_delay_timer + & 0x3FFF; + } + if (scope > 0 && qualifier > 0 && + qualifier <= 0x3FEF) { + /* Set the jiffies + + * retry_delay_timer * 100ms + * for the rport/tgt + */ + tgt->retry_delay_timestamp = jiffies + + (qualifier * HZ / 10); + } } - } if (io_req->fcp_resid) scsi_set_resid(sc_cmd, io_req->fcp_resid); diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 12666313b937..e53ebc5eff85 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -2715,7 +2715,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) reg_base = pci_resource_start(ep->hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF); - ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4); + ep->qp.ctx_base = ioremap(reg_base + reg_off, 4); if (!ep->qp.ctx_base) return -ENOMEM; goto arm_cq; @@ -2736,7 +2736,7 @@ int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep) /* 5709 device in normal node and 5706/5708 devices */ reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num); - ep->qp.ctx_base = ioremap_nocache(ep->hba->reg_base + reg_off, + ep->qp.ctx_base = ioremap(ep->hba->reg_base + reg_off, MB_KERNEL_CTX_SIZE); if (!ep->qp.ctx_base) return -ENOMEM; diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index c5fa5f3b00e9..0b28d44d3573 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -915,12 +915,12 @@ void bnx2i_free_hba(struct bnx2i_hba *hba) INIT_LIST_HEAD(&hba->ep_ofld_list); INIT_LIST_HEAD(&hba->ep_active_list); INIT_LIST_HEAD(&hba->ep_destroy_list); - pci_dev_put(hba->pcidev); if (hba->regview) { pci_iounmap(hba->pcidev, hba->regview); hba->regview = NULL; } + pci_dev_put(hba->pcidev); bnx2i_free_mp_bdt(hba); bnx2i_release_free_cid_que(hba); iscsi_host_free(shost); diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c index 5f8153c37f77..ed5f4a6ae270 100644 --- a/drivers/scsi/ch.c +++ b/drivers/scsi/ch.c @@ -579,7 +579,6 @@ ch_release(struct inode *inode, struct file *file) scsi_changer *ch = file->private_data; scsi_device_put(ch->device); - ch->device = NULL; file->private_data = NULL; kref_put(&ch->ref, ch_destroy); return 0; @@ -873,6 +872,10 @@ static long ch_ioctl_compat(struct file * file, unsigned int cmd, unsigned long arg) { scsi_changer *ch = file->private_data; + int retval = scsi_ioctl_block_when_processing_errors(ch->device, cmd, + file->f_flags & O_NDELAY); + if (retval) + return retval; switch (cmd) { case CHIOGPARAMS: @@ -884,7 +887,7 @@ static long ch_ioctl_compat(struct file * file, case CHIOINITELEM: case CHIOSVOLTAG: /* compatible */ - return ch_ioctl(file, cmd, arg); + return ch_ioctl(file, cmd, (unsigned long)compat_ptr(arg)); case CHIOGSTATUS32: { struct changer_element_status32 ces32; @@ -899,8 +902,7 @@ static long ch_ioctl_compat(struct file * file, return ch_gstatus(ch, ces32.ces_type, data); } default: - // return scsi_ioctl_compat(ch->device, cmd, (void*)arg); - return -ENOIOCTLCMD; + return scsi_compat_ioctl(ch->device, cmd, compat_ptr(arg)); } } diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index e51923886475..950f9cdf0577 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -793,10 +793,10 @@ csio_hw_get_flash_params(struct csio_hw *hw) goto found; } - /* Decode Flash part size. The code below looks repetative with + /* Decode Flash part size. The code below looks repetitive with * common encodings, but that's not guaranteed in the JEDEC - * specification for the Read JADEC ID command. The only thing that - * we're guaranteed by the JADEC specification is where the + * specification for the Read JEDEC ID command. The only thing that + * we're guaranteed by the JEDEC specification is where the * Manufacturer ID is in the returned result. After that each * Manufacturer ~could~ encode things completely differently. * Note, all Flash parts must have 64KB sectors. @@ -983,8 +983,8 @@ retry: waiting -= 50; /* - * If neither Error nor Initialialized are indicated - * by the firmware keep waiting till we exaust our + * If neither Error nor Initialized are indicated + * by the firmware keep waiting till we exhaust our * timeout ... and then retry if we haven't exhausted * our retries ... */ @@ -1738,7 +1738,7 @@ static void csio_link_l1cfg(struct link_config *lc, uint16_t fw_caps, * Convert Common Code Forward Error Control settings into the * Firmware's API. If the current Requested FEC has "Automatic" * (IEEE 802.3) specified, then we use whatever the Firmware - * sent us as part of it's IEEE 802.3-based interpratation of + * sent us as part of it's IEEE 802.3-based interpretation of * the Transceiver Module EPROM FEC parameters. Otherwise we * use whatever is in the current Requested FEC settings. */ @@ -2834,7 +2834,7 @@ csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt) } /* - * csio_hws_initializing - Initialiazing state + * csio_hws_initializing - Initializing state * @hw - HW module * @evt - Event * @@ -3049,7 +3049,7 @@ csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt) if (!csio_is_hw_master(hw)) break; /* - * The BYE should have alerady been issued, so we cant + * The BYE should have already been issued, so we can't * use the mailbox interface. Hence we use the PL_RST * register directly. */ @@ -3104,7 +3104,7 @@ csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt) * * A table driven interrupt handler that applies a set of masks to an * interrupt status word and performs the corresponding actions if the - * interrupts described by the mask have occured. The actions include + * interrupts described by the mask have occurred. The actions include * optionally emitting a warning or alert message. The table is terminated * by an entry specifying mask 0. Returns the number of fatal interrupt * conditions. @@ -4219,7 +4219,7 @@ csio_mgmtm_exit(struct csio_mgmtm *mgmtm) * @hw: Pointer to HW module. * * It is assumed that the initialization is a synchronous operation. - * So when we return afer posting the event, the HW SM should be in + * So when we return after posting the event, the HW SM should be in * the ready state, if there were no errors during init. */ int diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c index a6dd704d7f2d..8dea7d53788a 100644 --- a/drivers/scsi/csiostor/csio_init.c +++ b/drivers/scsi/csiostor/csio_init.c @@ -154,13 +154,10 @@ csio_dfs_create(struct csio_hw *hw) /* * csio_dfs_destroy - Destroys per-hw debugfs. */ -static int +static void csio_dfs_destroy(struct csio_hw *hw) { - if (hw->debugfs_root) - debugfs_remove_recursive(hw->debugfs_root); - - return 0; + debugfs_remove_recursive(hw->debugfs_root); } /* @@ -532,7 +529,7 @@ static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev) goto err_free_hw; /* Get the start address of registers from BAR 0 */ - hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0), + hw->regstart = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!hw->regstart) { csio_err(hw, "Could not map BAR 0, regstart = %p\n", diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c index 66e58f0a75dc..74ff8adc41f7 100644 --- a/drivers/scsi/csiostor/csio_lnode.c +++ b/drivers/scsi/csiostor/csio_lnode.c @@ -301,6 +301,7 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_port_name *port_name; uint8_t buf[64]; uint8_t *fc4_type; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n", @@ -385,13 +386,13 @@ csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len = (uint32_t)(pld - (uint8_t *)cmd); /* Submit FDMI RPA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -412,6 +413,7 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) struct fc_fdmi_rpl *reg_pl; struct fs_fdmi_attrs *attrib_blk; uint8_t buf[64]; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n", @@ -491,13 +493,13 @@ csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) attrib_blk->numattrs = htonl(numattrs); /* Submit FDMI RHBA request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /* @@ -512,6 +514,7 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) void *cmd; struct fc_fdmi_port_name *port_name; uint32_t len; + unsigned long flags; if (fdmi_req->wr_status != FW_SUCCESS) { csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n", @@ -542,13 +545,13 @@ csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req) len += sizeof(*port_name); /* Submit FDMI request */ - spin_lock_irq(&hw->lock); + spin_lock_irqsave(&hw->lock, flags); if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn, FCOE_CT, &fdmi_req->dma_buf, len)) { CSIO_INC_STATS(ln, n_fdmi_err); csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n"); } - spin_unlock_irq(&hw->lock); + spin_unlock_irqrestore(&hw->lock, flags); } /** @@ -1989,7 +1992,7 @@ static int csio_ln_init(struct csio_lnode *ln) { int rv = -EINVAL; - struct csio_lnode *rln, *pln; + struct csio_lnode *pln; struct csio_hw *hw = csio_lnode_to_hw(ln); csio_init_state(&ln->sm, csio_lns_uninit); @@ -2019,7 +2022,6 @@ csio_ln_init(struct csio_lnode *ln) * THe rest is common for non-root physical and NPIV lnodes. * Just get references to all other modules */ - rln = csio_root_lnode(ln); if (csio_is_npiv_ln(ln)) { /* NPIV */ diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 6f13673d6aa0..94810b19e747 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -1210,7 +1210,7 @@ csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp) !csio_is_hw_intr_enabled(hw)) { csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n", *((uint8_t *)mbp->mb)); - goto error_out; + goto error_out; } if (mbm->mcurrent != NULL) { diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c index 469d0bc9f5fe..00cf33573136 100644 --- a/drivers/scsi/csiostor/csio_scsi.c +++ b/drivers/scsi/csiostor/csio_scsi.c @@ -1383,7 +1383,7 @@ csio_device_reset(struct device *dev, return -EINVAL; /* Delete NPIV lnodes */ - csio_lnodes_exit(hw, 1); + csio_lnodes_exit(hw, 1); /* Block upper IOs */ csio_lnodes_block_request(hw); diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c index 03bd896cdbb9..0ca695110f54 100644 --- a/drivers/scsi/csiostor/csio_wr.c +++ b/drivers/scsi/csiostor/csio_wr.c @@ -1316,7 +1316,6 @@ csio_wr_fixup_host_params(struct csio_hw *hw) u32 fl_align = clsz < 32 ? 32 : clsz; u32 pack_align; u32 ingpad, ingpack; - int pcie_cap; csio_wr_reg32(hw, HOSTPAGESIZEPF0_V(s_hps) | HOSTPAGESIZEPF1_V(s_hps) | HOSTPAGESIZEPF2_V(s_hps) | HOSTPAGESIZEPF3_V(s_hps) | @@ -1347,8 +1346,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw) * multiple of the Maximum Payload Size. */ pack_align = fl_align; - pcie_cap = pci_find_capability(hw->pdev, PCI_CAP_ID_EXP); - if (pcie_cap) { + if (pci_is_pcie(hw->pdev)) { u32 mps, mps_log; u16 devctl; @@ -1356,9 +1354,7 @@ csio_wr_fixup_host_params(struct csio_hw *hw) * [bits 7:5] encodes sizes as powers of 2 starting at * 128 bytes. */ - pci_read_config_word(hw->pdev, - pcie_cap + PCI_EXP_DEVCTL, - &devctl); + pcie_capability_read_word(hw->pdev, PCI_EXP_DEVCTL, &devctl); mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7; mps = 1 << mps_log; if (mps > pack_align) diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index da50e87921bc..bc1086ae6835 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -2073,7 +2073,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); struct net_device *ndev = cdev->ports[0]; struct cxgbi_tag_format tformat; - unsigned int ppmax; int i, err; if (!lldi->vr->iscsi.size) { @@ -2082,7 +2081,6 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev) } cdev->flags |= CXGBI_FLAG_USE_PPOD_OFLDQ; - ppmax = lldi->vr->iscsi.size >> PPOD_SIZE_SHIFT; memset(&tformat, 0, sizeof(struct cxgbi_tag_format)); for (i = 0; i < 4; i++) diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 3e17af8aedeb..4bc794d2f51c 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -121,7 +121,8 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev) "cdev 0x%p, p# %u.\n", cdev, cdev->nports); cxgbi_hbas_remove(cdev); cxgbi_device_portmap_cleanup(cdev); - cxgbi_ppm_release(cdev->cdev2ppm(cdev)); + if (cdev->cdev2ppm) + cxgbi_ppm_release(cdev->cdev2ppm(cdev)); if (cdev->pmap.max_connect) cxgbi_free_big_mem(cdev->pmap.port_csk); kfree(cdev); @@ -2284,34 +2285,6 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn, } EXPORT_SYMBOL_GPL(cxgbi_set_conn_param); -static inline int csk_print_port(struct cxgbi_sock *csk, char *buf) -{ - int len; - - cxgbi_sock_get(csk); - len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port)); - cxgbi_sock_put(csk); - - return len; -} - -static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf) -{ - int len; - - cxgbi_sock_get(csk); - if (csk->csk_family == AF_INET) - len = sprintf(buf, "%pI4", - &csk->daddr.sin_addr.s_addr); - else - len = sprintf(buf, "%pI6", - &csk->daddr6.sin6_addr); - - cxgbi_sock_put(csk); - - return len; -} - int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param, char *buf) { @@ -2774,7 +2747,7 @@ static int __init libcxgbi_init_module(void) { pr_info("%s", version); - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < + BUILD_BUG_ON(sizeof_field(struct sk_buff, cb) < sizeof(struct cxgbi_skb_cb)); return 0; } diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index b1f4724efde2..fbd2ae40dab4 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -44,14 +44,12 @@ static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) struct afu *afu = cmd->parent; struct cxlflash_cfg *cfg = afu->parent; struct device *dev = &cfg->dev->dev; - struct sisl_ioarcb *ioarcb; struct sisl_ioasa *ioasa; u32 resid; if (unlikely(!cmd)) return; - ioarcb = &(cmd->rcb); ioasa = &(cmd->sa); if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { @@ -753,10 +751,13 @@ static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ if (index == PRIMARY_HWQ) cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); + /* fall through */ case UNMAP_TWO: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); + /* fall through */ case UNMAP_ONE: cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); + /* fall through */ case FREE_IRQ: cfg->ops->free_afu_irqs(hwq->ctx_cookie); /* fall through */ @@ -973,14 +974,18 @@ static void cxlflash_remove(struct pci_dev *pdev) switch (cfg->init_state) { case INIT_STATE_CDEV: cxlflash_release_chrdev(cfg); + /* fall through */ case INIT_STATE_SCSI: cxlflash_term_local_luns(cfg); scsi_remove_host(cfg->host); + /* fall through */ case INIT_STATE_AFU: term_afu(cfg); + /* fall through */ case INIT_STATE_PCI: cfg->ops->destroy_afu(cfg->afu_cookie); pci_disable_device(pdev); + /* fall through */ case INIT_STATE_NONE: free_mem(cfg); scsi_host_put(cfg->host); @@ -2353,11 +2358,11 @@ retry: cxlflash_schedule_async_reset(cfg); break; } - /* fall through to retry */ + /* fall through - to retry */ case -EAGAIN: if (++nretry < 2) goto retry; - /* fall through to exit */ + /* fall through - to exit */ default: break; } @@ -3017,6 +3022,7 @@ retry: wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); if (cfg->state == STATE_NORMAL) goto retry; + /* else, fall through */ default: /* Ideally should not happen */ dev_err(dev, "%s: Device is not ready, state=%d\n", @@ -3585,7 +3591,7 @@ static const struct file_operations cxlflash_chr_fops = { .owner = THIS_MODULE, .open = cxlflash_chr_open, .unlocked_ioctl = cxlflash_chr_ioctl, - .compat_ioctl = cxlflash_chr_ioctl, + .compat_ioctl = compat_ptr_ioctl, }; /** diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 4971104b1817..f32da0ca529e 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -512,6 +512,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; + bool transitioning_sense = false; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; @@ -572,13 +573,19 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) goto retry; } /* - * Retry on ALUA state transition or if any - * UNIT ATTENTION occurred. + * If the array returns with 'ALUA state transition' + * sense code here it cannot return RTPG data during + * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && - sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) - err = SCSI_DH_RETRY; - else if (sense_hdr.sense_key == UNIT_ATTENTION) + sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { + transitioning_sense = true; + goto skip_rtpg; + } + /* + * Retry on any other UNIT ATTENTION occurred. + */ + if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { @@ -666,7 +673,11 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) off = 8 + (desc[7] * 4); } + skip_rtpg: spin_lock_irqsave(&pg->lock, flags); + if (transitioning_sense) + pg->state = SCSI_ACCESS_STATE_TRANSITIONING; + sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 65f1fe343c64..5efc959493ec 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work) spin_unlock(&ctlr->ms_lock); retry: + memset(cdb, 0, sizeof(cdb)); + data_size = rdac_failover_get(ctlr, &list, cdb); RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, " diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index 7bd376d95ed5..b02ac389e6c6 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c @@ -1197,6 +1197,7 @@ bool esas2r_nvram_read_direct(struct esas2r_adapter *a) if (!esas2r_read_flash_block(a, a->nvram, FLS_OFFSET_NVR, sizeof(struct esas2r_sas_nvram))) { esas2r_hdebug("NVRAM read failed, using defaults"); + up(&a->nvram_semaphore); return false; } diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 950cd92df2ff..eb7d139ffc00 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c @@ -762,14 +762,10 @@ u32 esas2r_get_uncached_size(struct esas2r_adapter *a) static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) { - int pcie_cap_reg; - - pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); - if (pcie_cap_reg) { + if (pci_is_pcie(a->pcid)) { u16 devcontrol; - pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, - &devcontrol); + pcie_capability_read_word(a->pcid, PCI_EXP_DEVCTL, &devcontrol); if ((devcontrol & PCI_EXP_DEVCTL_READRQ) > PCI_EXP_DEVCTL_READRQ_512B) { @@ -778,9 +774,8 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) devcontrol &= ~PCI_EXP_DEVCTL_READRQ; devcontrol |= PCI_EXP_DEVCTL_READRQ_512B; - pci_write_config_word(a->pcid, - pcie_cap_reg + PCI_EXP_DEVCTL, - devcontrol); + pcie_capability_write_word(a->pcid, PCI_EXP_DEVCTL, + devcontrol); } } } diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index 3d130523c288..442c5e70a7b4 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c @@ -757,7 +757,6 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, struct atto_hba_get_adapter_info *gai = &hi->data.get_adap_info; - int pcie_cap_reg; if (hi->flags & HBAF_TUNNEL) { hi->status = ATTO_STS_UNSUPPORTED; @@ -784,17 +783,14 @@ static int hba_ioctl_callback(struct esas2r_adapter *a, gai->pci.dev_num = PCI_SLOT(a->pcid->devfn); gai->pci.func_num = PCI_FUNC(a->pcid->devfn); - pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); - if (pcie_cap_reg) { + if (pci_is_pcie(a->pcid)) { u16 stat; u32 caps; - pci_read_config_word(a->pcid, - pcie_cap_reg + PCI_EXP_LNKSTA, - &stat); - pci_read_config_dword(a->pcid, - pcie_cap_reg + PCI_EXP_LNKCAP, - &caps); + pcie_capability_read_word(a->pcid, PCI_EXP_LNKSTA, + &stat); + pcie_capability_read_dword(a->pcid, PCI_EXP_LNKCAP, + &caps); gai->pci.link_speed_curr = (u8)(stat & PCI_EXP_LNKSTA_CLS); diff --git a/drivers/scsi/esas2r/esas2r_main.c b/drivers/scsi/esas2r/esas2r_main.c index fdbda5c05aa0..7b49e2e9fcde 100644 --- a/drivers/scsi/esas2r/esas2r_main.c +++ b/drivers/scsi/esas2r/esas2r_main.c @@ -613,10 +613,17 @@ static int __init esas2r_init(void) /* Handle ioctl calls to "/proc/scsi/esas2r/ATTOnode" */ static const struct file_operations esas2r_proc_fops = { - .compat_ioctl = esas2r_proc_ioctl, + .compat_ioctl = compat_ptr_ioctl, .unlocked_ioctl = esas2r_proc_ioctl, }; +static const struct proc_ops esas2r_proc_ops = { + .proc_ioctl = esas2r_proc_ioctl, +#ifdef CONFIG_COMPAT + .proc_compat_ioctl = compat_ptr_ioctl, +#endif +}; + static struct Scsi_Host *esas2r_proc_host; static int esas2r_proc_major; @@ -728,7 +735,7 @@ const char *esas2r_info(struct Scsi_Host *sh) pde = proc_create(ATTONODE_NAME, 0, sh->hostt->proc_dir, - &esas2r_proc_fops); + &esas2r_proc_ops); if (!pde) { esas2r_log_dev(ESAS2R_LOG_WARN, diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index bb88995a12c7..89afa31e33cb 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -243,8 +243,6 @@ static void esp_set_all_config3(struct esp *esp, u8 val) /* Reset the ESP chip, _not_ the SCSI bus. */ static void esp_reset_esp(struct esp *esp) { - u8 family_code, version; - /* Now reset the ESP chip */ scsi_esp_cmd(esp, ESP_CMD_RC); scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); @@ -257,14 +255,19 @@ static void esp_reset_esp(struct esp *esp) */ esp->max_period = ((35 * esp->ccycle) / 1000); if (esp->rev == FAST) { - version = esp_read8(ESP_UID); - family_code = (version & 0xf8) >> 3; - if (family_code == 0x02) + u8 family_code = ESP_FAMILY(esp_read8(ESP_UID)); + + if (family_code == ESP_UID_F236) { esp->rev = FAS236; - else if (family_code == 0x0a) + } else if (family_code == ESP_UID_HME) { esp->rev = FASHME; /* Version is usually '5'. */ - else + } else if (family_code == ESP_UID_FSC) { + esp->rev = FSC; + /* Enable Active Negation */ + esp_write8(ESP_CONFIG4_RADE, ESP_CFG4); + } else { esp->rev = FAS100A; + } esp->min_period = ((4 * esp->ccycle) / 1000); } else { esp->min_period = ((5 * esp->ccycle) / 1000); @@ -308,7 +311,7 @@ static void esp_reset_esp(struct esp *esp) case FAS236: case PCSCSI: - /* Fast 236, AM53c974 or HME */ + case FSC: esp_write8(esp->config2, ESP_CFG2); if (esp->rev == FASHME) { u8 cfg3 = esp->target[0].esp_config3; @@ -2373,10 +2376,11 @@ static const char *esp_chip_names[] = { "ESP100A", "ESP236", "FAS236", + "AM53C974", + "53CF9x-2", "FAS100A", "FAST", "FASHME", - "AM53C974", }; static struct scsi_transport_template *esp_transport_template; diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 91b32f2a1a1b..446a3d18c022 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -78,12 +78,14 @@ #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ -/* ESP config register 4 read-write, found only on am53c974 chips */ -#define ESP_CONFIG4_RADE 0x04 /* Active negation */ -#define ESP_CONFIG4_RAE 0x08 /* Active negation on REQ and ACK */ -#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature */ -#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 */ -#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 */ +/* ESP config register 4 read-write */ +#define ESP_CONFIG4_BBTE 0x01 /* Back-to-back transfers (fsc) */ +#define ESP_CONGIG4_TEST 0x02 /* Transfer counter test mode (fsc) */ +#define ESP_CONFIG4_RADE 0x04 /* Active negation (am53c974/fsc) */ +#define ESP_CONFIG4_RAE 0x08 /* Act. negation REQ/ACK (am53c974) */ +#define ESP_CONFIG4_PWD 0x20 /* Reduced power feature (am53c974) */ +#define ESP_CONFIG4_GE0 0x40 /* Glitch eater bit 0 (am53c974) */ +#define ESP_CONFIG4_GE1 0x80 /* Glitch eater bit 1 (am53c974) */ #define ESP_CONFIG_GE_12NS (0) #define ESP_CONFIG_GE_25NS (ESP_CONFIG_GE1) @@ -209,10 +211,15 @@ #define ESP_TEST_TS 0x04 /* Tristate test mode */ /* ESP unique ID register read-only, found on fas236+fas100a only */ +#define ESP_UID_FAM 0xf8 /* ESP family bitmask */ + +#define ESP_FAMILY(uid) (((uid) & ESP_UID_FAM) >> 3) + +/* Values for the ESP family bits */ #define ESP_UID_F100A 0x00 /* ESP FAS100A */ #define ESP_UID_F236 0x02 /* ESP FAS236 */ -#define ESP_UID_REV 0x07 /* ESP revision */ -#define ESP_UID_FAM 0xf8 /* ESP family */ +#define ESP_UID_HME 0x0a /* FAS HME */ +#define ESP_UID_FSC 0x14 /* NCR/Symbios Logic 53CF9x-2 */ /* ESP fifo flags register read-only */ /* Note that the following implies a 16 byte FIFO on the ESP. */ @@ -257,15 +264,17 @@ struct esp_cmd_priv { }; #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) +/* NOTE: this enum is ordered based on chip features! */ enum esp_rev { - ESP100 = 0x00, /* NCR53C90 - very broken */ - ESP100A = 0x01, /* NCR53C90A */ - ESP236 = 0x02, - FAS236 = 0x03, - FAS100A = 0x04, - FAST = 0x05, - FASHME = 0x06, - PCSCSI = 0x07, /* AM53c974 */ + ESP100, /* NCR53C90 - very broken */ + ESP100A, /* NCR53C90A */ + ESP236, + FAS236, + PCSCSI, /* AM53c974 */ + FSC, /* NCR/Symbios Logic 53CF9x-2 */ + FAS100A, + FAST, + FASHME, }; struct esp_cmd_entry { diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 00dd47bcbb1e..25dae9f0b205 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1250,15 +1250,21 @@ static int __init fcoe_if_init(void) /* attach to scsi transport */ fcoe_nport_scsi_transport = fc_attach_transport(&fcoe_nport_fc_functions); + if (!fcoe_nport_scsi_transport) + goto err; + fcoe_vport_scsi_transport = fc_attach_transport(&fcoe_vport_fc_functions); - - if (!fcoe_nport_scsi_transport) { - printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); - return -ENODEV; - } + if (!fcoe_vport_scsi_transport) + goto err_vport; return 0; + +err_vport: + fc_release_transport(fcoe_nport_scsi_transport); +err: + printk(KERN_ERR "fcoe: Failed to attach to the FC transport\n"); + return -ENODEV; } /** @@ -1522,8 +1528,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) - + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } @@ -1618,7 +1623,6 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, else fr_flags(fp) |= FCPHF_CRC_UNCHECKED; - fh = (struct fc_frame_header *) skb_transport_header(skb); fh = fc_frame_header_get(fp); if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && fh->fh_type == FC_TYPE_FCP) return 0; diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index ba4603d76284..a20ddc301c89 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -308,7 +308,7 @@ EXPORT_SYMBOL_GPL(fcoe_get_wwn); u32 fcoe_fc_crc(struct fc_frame *fp) { struct sk_buff *skb = fp_skb(fp); - struct skb_frag_struct *frag; + skb_frag_t *frag; unsigned char *data; unsigned long off, len, clen; u32 crc; @@ -318,7 +318,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp) for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; - off = frag->page_offset; + off = skb_frag_off(frag); len = skb_frag_size(frag); while (len > 0) { clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c index b5e66971b6d9..772bdc93930a 100644 --- a/drivers/scsi/fdomain.c +++ b/drivers/scsi/fdomain.c @@ -166,7 +166,7 @@ static int fdomain_test_loopback(int base) static void fdomain_reset(int base) { - outb(1, base + REG_BCTL); + outb(BCTL_RST, base + REG_BCTL); mdelay(20); outb(0, base + REG_BCTL); mdelay(1150); @@ -306,7 +306,7 @@ static void fdomain_work(struct work_struct *work) status = inb(fd->base + REG_BSTAT); if (status & BSTAT_REQ) { - switch (status & 0x0e) { + switch (status & (BSTAT_MSG | BSTAT_CMD | BSTAT_IO)) { case BSTAT_CMD: /* COMMAND OUT */ outb(cmd->cmnd[cmd->SCp.sent_command++], fd->base + REG_SCSI_DATA); @@ -331,7 +331,7 @@ static void fdomain_work(struct work_struct *work) case BSTAT_MSG | BSTAT_CMD: /* MESSAGE OUT */ outb(MESSAGE_REJECT, fd->base + REG_SCSI_DATA); break; - case BSTAT_MSG | BSTAT_IO | BSTAT_CMD: /* MESSAGE IN */ + case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */ cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA); if (!cmd->SCp.Message) ++done; diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c index 28639adf8219..f2da4fa382e8 100644 --- a/drivers/scsi/fdomain_isa.c +++ b/drivers/scsi/fdomain_isa.c @@ -131,8 +131,7 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev) if (!request_region(base, FDOMAIN_REGION_SIZE, "fdomain_isa")) return 0; - irq = irqs[(inb(base + REG_CFG1) & 0x0e) >> 1]; - + irq = irqs[(inb(base + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; if (sig) this_id = sig->this_id; @@ -164,7 +163,7 @@ static int fdomain_isa_param_match(struct device *dev, unsigned int ndev) } if (irq_ <= 0) - irq_ = irqs[(inb(io[ndev] + REG_CFG1) & 0x0e) >> 1]; + irq_ = irqs[(inb(io[ndev] + REG_CFG1) & CFG1_IRQ_MASK) >> 1]; sh = fdomain_create(io[ndev], irq_, scsi_id[ndev], dev); if (!sh) { diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c index 21991c99db7c..13f7d88d6e57 100644 --- a/drivers/scsi/fnic/fnic_debugfs.c +++ b/drivers/scsi/fnic/fnic_debugfs.c @@ -52,7 +52,6 @@ static struct fc_trace_flag_type *fc_trc_flag; */ int fnic_debugfs_init(void) { - int rc = -1; fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL); fnic_stats_debugfs_root = debugfs_create_dir("statistics", @@ -70,8 +69,7 @@ int fnic_debugfs_init(void) fc_trc_flag->fc_clear = 4; } - rc = 0; - return rc; + return 0; } /* diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c index 911a5adc289c..673887e383cc 100644 --- a/drivers/scsi/fnic/fnic_fcs.c +++ b/drivers/scsi/fnic/fnic_fcs.c @@ -52,6 +52,7 @@ void fnic_handle_link(struct work_struct *work) unsigned long flags; int old_link_status; u32 old_link_down_cnt; + u64 old_port_speed, new_port_speed; spin_lock_irqsave(&fnic->fnic_lock, flags); @@ -62,14 +63,19 @@ void fnic_handle_link(struct work_struct *work) old_link_down_cnt = fnic->link_down_cnt; old_link_status = fnic->link_status; + old_port_speed = atomic64_read( + &fnic->fnic_stats.misc_stats.current_port_speed); + fnic->link_status = vnic_dev_link_status(fnic->vdev); fnic->link_down_cnt = vnic_dev_link_down_cnt(fnic->vdev); + new_port_speed = vnic_dev_port_speed(fnic->vdev); atomic64_set(&fnic->fnic_stats.misc_stats.current_port_speed, - vnic_dev_port_speed(fnic->vdev)); - shost_printk(KERN_INFO, fnic->lport->host, "Current vnic speed set to : %llu\n", - (u64)atomic64_read( - &fnic->fnic_stats.misc_stats.current_port_speed)); + new_port_speed); + if (old_port_speed != new_port_speed) + shost_printk(KERN_INFO, fnic->lport->host, + "Current vnic speed set to : %llu\n", + new_port_speed); switch (vnic_dev_port_speed(fnic->vdev)) { case DCEM_PORTSPEED_10G: diff --git a/drivers/scsi/fnic/fnic_isr.c b/drivers/scsi/fnic/fnic_isr.c index da4602b63495..2fb2731f50fb 100644 --- a/drivers/scsi/fnic/fnic_isr.c +++ b/drivers/scsi/fnic/fnic_isr.c @@ -254,7 +254,7 @@ int fnic_set_intr_mode(struct fnic *fnic) int vecs = n + m + o + 1; if (pci_alloc_irq_vectors(fnic->pdev, vecs, vecs, - PCI_IRQ_MSIX) < 0) { + PCI_IRQ_MSIX) == vecs) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; @@ -280,7 +280,7 @@ int fnic_set_intr_mode(struct fnic *fnic) fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && - pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) < 0) { + pci_alloc_irq_vectors(fnic->pdev, 1, 1, PCI_IRQ_MSI) == 1) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 80608b53897b..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, @@ -1024,7 +1027,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, atomic64_inc(&fnic_stats->io_stats.io_completions); - io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time); + io_duration_time = jiffies_to_msecs(jiffies) - + jiffies_to_msecs(start_time); if(io_duration_time <= 10) atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec); diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c index 9621831e17ba..a0d01aea28f7 100644 --- a/drivers/scsi/fnic/fnic_trace.c +++ b/drivers/scsi/fnic/fnic_trace.c @@ -453,7 +453,7 @@ int fnic_get_stats_data(struct stats_debug_info *debug, (u64)atomic64_read(&stats->misc_stats.frame_errors)); len += snprintf(debug->debug_buffer + len, buf_size - len, - "Firmware reported port seed: %llu\n", + "Firmware reported port speed: %llu\n", (u64)atomic64_read( &stats->misc_stats.current_port_speed)); diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index 78af9cc2009b..1b88a3b53eee 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -259,7 +259,7 @@ int vnic_dev_cmd1(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd, int wait) struct vnic_devcmd __iomem *devcmd = vdev->devcmd; int delay; u32 status; - int dev_cmd_err[] = { + static const int dev_cmd_err[] = { /* convert from fw's version of error.h to host's version */ 0, /* ERR_SUCCESS */ EINVAL, /* ERR_EINVAL */ @@ -688,26 +688,26 @@ int vnic_dev_soft_reset_done(struct vnic_dev *vdev, int *done) int vnic_dev_hang_notify(struct vnic_dev *vdev) { - u64 a0, a1; + u64 a0 = 0, a1 = 0; int wait = 1000; return vnic_dev_cmd(vdev, CMD_HANG_NOTIFY, &a0, &a1, wait); } int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr) { - u64 a0, a1; + u64 a[2] = {}; int wait = 1000; int err, i; for (i = 0; i < ETH_ALEN; i++) mac_addr[i] = 0; - err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_MAC_ADDR, &a[0], &a[1], wait); if (err) return err; for (i = 0; i < ETH_ALEN; i++) - mac_addr[i] = ((u8 *)&a0)[i]; + mac_addr[i] = ((u8 *)&a)[i]; return 0; } @@ -732,30 +732,30 @@ void vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast, void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_ADD, &a[0], &a[1], wait); if (err) pr_err("Can't add addr [%pM], %d\n", addr, err); } void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr) { - u64 a0 = 0, a1 = 0; + u64 a[2] = {}; int wait = 1000; int err; int i; for (i = 0; i < ETH_ALEN; i++) - ((u8 *)&a0)[i] = addr[i]; + ((u8 *)&a)[i] = addr[i]; - err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a0, &a1, wait); + err = vnic_dev_cmd(vdev, CMD_ADDR_DEL, &a[0], &a[1], wait); if (err) pr_err("Can't del addr [%pM], %d\n", addr, err); } diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 42a02cc47a60..2bdd64648ef0 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -21,6 +21,7 @@ #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regmap.h> +#include <linux/timer.h> #include <scsi/sas_ata.h> #include <scsi/libsas.h> @@ -31,7 +32,13 @@ #define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES #define HISI_SAS_RESET_BIT 0 #define HISI_SAS_REJECT_CMD_BIT 1 -#define HISI_SAS_RESERVED_IPTT_CNT 96 +#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS) +#define HISI_SAS_RESERVED_IPTT 96 +#define HISI_SAS_UNRESERVED_IPTT \ + (HISI_SAS_MAX_COMMANDS - HISI_SAS_RESERVED_IPTT) + +#define HISI_SAS_IOST_ITCT_CACHE_NUM 64 +#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10 #define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer)) #define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table)) @@ -78,6 +85,7 @@ #define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK) #define HISI_SAS_WAIT_PHYUP_TIMEOUT 20 +#define CLEAR_ITCT_TIMEOUT 20 struct hisi_hba; @@ -128,7 +136,6 @@ struct hisi_sas_rst { #define HISI_SAS_DECLARE_RST_WORK_ON_STACK(r) \ DECLARE_COMPLETION_ONSTACK(c); \ - DECLARE_WORK(w, hisi_sas_sync_rst_work_handler); \ struct hisi_sas_rst r = HISI_SAS_RST_WORK_INIT(r, c) enum hisi_sas_bit_err_type { @@ -162,6 +169,7 @@ struct hisi_sas_phy { enum sas_linkrate minimum_linkrate; enum sas_linkrate maximum_linkrate; int enable; + atomic_t down_cnt; }; struct hisi_sas_port { @@ -172,10 +180,10 @@ struct hisi_sas_port { struct hisi_sas_cq { struct hisi_hba *hisi_hba; - const struct cpumask *pci_irq_mask; - struct tasklet_struct tasklet; + const struct cpumask *irq_mask; int rd_point; int id; + int irq_no; }; struct hisi_sas_dq { @@ -249,6 +257,22 @@ struct hisi_sas_debugfs_reg { }; }; +struct hisi_sas_iost_itct_cache { + u32 data[HISI_SAS_IOST_ITCT_CACHE_DW_SZ]; +}; + +enum hisi_sas_debugfs_reg_array_member { + DEBUGFS_GLOBAL = 0, + DEBUGFS_AXI, + DEBUGFS_RAS, + DEBUGFS_REGS_NUM +}; + +enum hisi_sas_debugfs_cache_type { + HISI_SAS_ITCT_CACHE, + HISI_SAS_IOST_CACHE, +}; + struct hisi_sas_hw { int (*hw_init)(struct hisi_hba *hisi_hba); void (*setup_itct)(struct hisi_hba *hisi_hba, @@ -257,7 +281,6 @@ struct hisi_sas_hw { struct domain_device *device); struct hisi_sas_device *(*alloc_dev)(struct domain_device *device); void (*sl_notify_ssp)(struct hisi_hba *hisi_hba, int phy_no); - int (*get_free_slot)(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq); void (*start_delivery)(struct hisi_sas_dq *dq); void (*prep_ssp)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot); @@ -268,8 +291,6 @@ struct hisi_sas_hw { void (*prep_abort)(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, int device_id, int abort_flag, int tag_to_abort); - int (*slot_complete)(struct hisi_hba *hisi_hba, - struct hisi_sas_slot *slot); void (*phys_init)(struct hisi_hba *hisi_hba); void (*phy_start)(struct hisi_hba *hisi_hba, int phy_no); void (*phy_disable)(struct hisi_hba *hisi_hba, int phy_no); @@ -278,8 +299,8 @@ struct hisi_sas_hw { void (*phy_set_linkrate)(struct hisi_hba *hisi_hba, int phy_no, struct sas_phy_linkrates *linkrates); enum sas_linkrate (*phy_get_max_linkrate)(void); - void (*clear_itct)(struct hisi_hba *hisi_hba, - struct hisi_sas_device *dev); + int (*clear_itct)(struct hisi_hba *hisi_hba, + struct hisi_sas_device *dev); void (*free_device)(struct hisi_sas_device *sas_dev); int (*get_wideport_bitmap)(struct hisi_hba *hisi_hba, int port_id); void (*dereg_device)(struct hisi_hba *hisi_hba, @@ -288,18 +309,59 @@ struct hisi_sas_hw { u32 (*get_phys_state)(struct hisi_hba *hisi_hba); int (*write_gpio)(struct hisi_hba *hisi_hba, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data); - int (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms); + void (*wait_cmds_complete_timeout)(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms); void (*snapshot_prepare)(struct hisi_hba *hisi_hba); void (*snapshot_restore)(struct hisi_hba *hisi_hba); - int max_command_entries; + int (*set_bist)(struct hisi_hba *hisi_hba, bool enable); + void (*read_iost_itct_cache)(struct hisi_hba *hisi_hba, + enum hisi_sas_debugfs_cache_type type, + u32 *cache); int complete_hdr_size; struct scsi_host_template *sht; - const struct hisi_sas_debugfs_reg *debugfs_reg_global; + const struct hisi_sas_debugfs_reg *debugfs_reg_array[DEBUGFS_REGS_NUM]; const struct hisi_sas_debugfs_reg *debugfs_reg_port; }; +#define HISI_SAS_MAX_DEBUGFS_DUMP (50) + +struct hisi_sas_debugfs_cq { + struct hisi_sas_cq *cq; + void *complete_hdr; +}; + +struct hisi_sas_debugfs_dq { + struct hisi_sas_dq *dq; + struct hisi_sas_cmd_hdr *hdr; +}; + +struct hisi_sas_debugfs_regs { + struct hisi_hba *hisi_hba; + u32 *data; +}; + +struct hisi_sas_debugfs_port { + struct hisi_sas_phy *phy; + u32 *data; +}; + +struct hisi_sas_debugfs_iost { + struct hisi_sas_iost *iost; +}; + +struct hisi_sas_debugfs_itct { + struct hisi_sas_itct *itct; +}; + +struct hisi_sas_debugfs_iost_cache { + struct hisi_sas_iost_itct_cache *cache; +}; + +struct hisi_sas_debugfs_itct_cache { + struct hisi_sas_iost_itct_cache *cache; +}; + struct hisi_hba { /* This must be the first element, used by SHOST_TO_SAS_HA */ struct sas_ha_struct *p; @@ -371,17 +433,30 @@ struct hisi_hba { int cq_nvecs; unsigned int *reply_map; - /* debugfs memories */ - u32 *debugfs_global_reg; - u32 *debugfs_port_reg[HISI_SAS_MAX_PHYS]; - void *debugfs_complete_hdr[HISI_SAS_MAX_QUEUES]; - struct hisi_sas_cmd_hdr *debugfs_cmd_hdr[HISI_SAS_MAX_QUEUES]; - struct hisi_sas_iost *debugfs_iost; - struct hisi_sas_itct *debugfs_itct; + /* bist */ + enum sas_linkrate debugfs_bist_linkrate; + int debugfs_bist_code_mode; + int debugfs_bist_phy_no; + int debugfs_bist_mode; + u32 debugfs_bist_cnt; + int debugfs_bist_enable; + /* debugfs memories */ + /* Put Global AXI and RAS Register into register array */ + struct hisi_sas_debugfs_regs debugfs_regs[HISI_SAS_MAX_DEBUGFS_DUMP][DEBUGFS_REGS_NUM]; + struct hisi_sas_debugfs_port debugfs_port_reg[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_PHYS]; + struct hisi_sas_debugfs_cq debugfs_cq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES]; + struct hisi_sas_debugfs_dq debugfs_dq[HISI_SAS_MAX_DEBUGFS_DUMP][HISI_SAS_MAX_QUEUES]; + struct hisi_sas_debugfs_iost debugfs_iost[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_itct debugfs_itct[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_iost_cache debugfs_iost_cache[HISI_SAS_MAX_DEBUGFS_DUMP]; + struct hisi_sas_debugfs_itct_cache debugfs_itct_cache[HISI_SAS_MAX_DEBUGFS_DUMP]; + + u64 debugfs_timestamp[HISI_SAS_MAX_DEBUGFS_DUMP]; + int debugfs_dump_index; struct dentry *debugfs_dir; struct dentry *debugfs_dump_dentry; - bool debugfs_snapshot; + struct dentry *debugfs_bist_dentry; }; /* Generic HW DMA host memory structures */ @@ -523,6 +598,7 @@ struct hisi_sas_slot_dif_buf_table { extern struct scsi_transport_template *hisi_sas_stt; extern bool hisi_sas_debugfs_enable; +extern u32 hisi_sas_debugfs_dump_count; extern struct dentry *hisi_sas_debugfs_dir; extern void hisi_sas_stop_phys(struct hisi_hba *hisi_hba); @@ -533,7 +609,6 @@ extern u8 hisi_sas_get_ata_protocol(struct host_to_dev_fis *fis, extern struct hisi_sas_port *to_hisi_sas_port(struct asd_sas_port *sas_port); extern void hisi_sas_sata_done(struct sas_task *task, struct hisi_sas_slot *slot); -extern int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag); extern int hisi_sas_get_fw_info(struct hisi_hba *hisi_hba); extern int hisi_sas_probe(struct platform_device *pdev, const struct hisi_sas_hw *ops); @@ -552,7 +627,7 @@ extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, extern void hisi_sas_init_mem(struct hisi_hba *hisi_hba); extern void hisi_sas_rst_work_handler(struct work_struct *work); extern void hisi_sas_sync_rst_work_handler(struct work_struct *work); -extern void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba); +extern void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba); extern void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no); extern bool hisi_sas_notify_phy_event(struct hisi_sas_phy *phy, enum hisi_sas_phy_event event); diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index cb746cfc2fa8..9a6deb21fe4d 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -118,21 +118,6 @@ void hisi_sas_sata_done(struct sas_task *task, } EXPORT_SYMBOL_GPL(hisi_sas_sata_done); -int hisi_sas_get_ncq_tag(struct sas_task *task, u32 *tag) -{ - struct ata_queued_cmd *qc = task->uldd_task; - - if (qc) { - if (qc->tf.command == ATA_CMD_FPDMA_WRITE || - qc->tf.command == ATA_CMD_FPDMA_READ) { - *tag = qc->tag; - return 1; - } - } - return 0; -} -EXPORT_SYMBOL_GPL(hisi_sas_get_ncq_tag); - /* * This function assumes linkrate mask fits in 8 bits, which it * does for all HW versions supported. @@ -178,13 +163,11 @@ static void hisi_sas_slot_index_clear(struct hisi_hba *hisi_hba, int slot_idx) static void hisi_sas_slot_index_free(struct hisi_hba *hisi_hba, int slot_idx) { - unsigned long flags; - - if (hisi_hba->hw->slot_index_alloc || (slot_idx >= - hisi_hba->hw->max_command_entries - HISI_SAS_RESERVED_IPTT_CNT)) { - spin_lock_irqsave(&hisi_hba->lock, flags); + if (hisi_hba->hw->slot_index_alloc || + slot_idx >= HISI_SAS_UNRESERVED_IPTT) { + spin_lock(&hisi_hba->lock); hisi_sas_slot_index_clear(hisi_hba, slot_idx); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); } } @@ -200,27 +183,25 @@ static int hisi_sas_slot_index_alloc(struct hisi_hba *hisi_hba, { int index; void *bitmap = hisi_hba->slot_index_tags; - unsigned long flags; if (scsi_cmnd) return scsi_cmnd->request->tag; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, hisi_hba->last_slot_index + 1); if (index >= hisi_hba->slot_index_count) { index = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, - hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT); + HISI_SAS_UNRESERVED_IPTT); if (index >= hisi_hba->slot_index_count) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } } hisi_sas_slot_index_set(hisi_hba, index); hisi_hba->last_slot_index = index; - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return index; } @@ -236,7 +217,6 @@ static void hisi_sas_slot_index_init(struct hisi_hba *hisi_hba) void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, struct hisi_sas_slot *slot) { - unsigned long flags; int device_id = slot->device_id; struct hisi_sas_device *sas_dev = &hisi_hba->devices[device_id]; @@ -263,9 +243,9 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task, } } - spin_lock_irqsave(&sas_dev->lock, flags); + spin_lock(&sas_dev->lock); list_del_init(&slot->entry); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); memset(slot, 0, offsetof(struct hisi_sas_slot, buf)); @@ -301,7 +281,7 @@ static void hisi_sas_task_prep_abort(struct hisi_hba *hisi_hba, static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, struct sas_task *task, int n_elem, - int n_elem_req, int n_elem_resp) + int n_elem_req) { struct device *dev = hisi_hba->dev; @@ -315,16 +295,13 @@ static void hisi_sas_dma_unmap(struct hisi_hba *hisi_hba, if (n_elem_req) dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); - if (n_elem_resp) - dma_unmap_sg(dev, &task->smp_task.smp_resp, - 1, DMA_FROM_DEVICE); } } } static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, struct sas_task *task, int *n_elem, - int *n_elem_req, int *n_elem_resp) + int *n_elem_req) { struct device *dev = hisi_hba->dev; int rc; @@ -332,7 +309,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, if (sas_protocol_ata(task->task_proto)) { *n_elem = task->num_scatter; } else { - unsigned int req_len, resp_len; + unsigned int req_len; if (task->num_scatter) { *n_elem = dma_map_sg(dev, task->scatter, @@ -353,17 +330,6 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, rc = -EINVAL; goto err_out_dma_unmap; } - *n_elem_resp = dma_map_sg(dev, &task->smp_task.smp_resp, - 1, DMA_FROM_DEVICE); - if (!*n_elem_resp) { - rc = -ENOMEM; - goto err_out_dma_unmap; - } - resp_len = sg_dma_len(&task->smp_task.smp_resp); - if (resp_len & 0x3) { - rc = -EINVAL; - goto err_out_dma_unmap; - } } } @@ -378,7 +344,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba, err_out_dma_unmap: /* It would be better to call dma_unmap_sg() here, but it's messy */ hisi_sas_dma_unmap(hisi_hba, task, *n_elem, - *n_elem_req, *n_elem_resp); + *n_elem_req); prep_out: return rc; } @@ -450,7 +416,7 @@ static int hisi_sas_task_prep(struct sas_task *task, struct asd_sas_port *sas_port = device->port; struct device *dev = hisi_hba->dev; int dlvry_queue_slot, dlvry_queue, rc, slot_idx; - int n_elem = 0, n_elem_dif = 0, n_elem_req = 0, n_elem_resp = 0; + int n_elem = 0, n_elem_dif = 0, n_elem_req = 0; struct hisi_sas_dq *dq; unsigned long flags; int wr_q_index; @@ -486,7 +452,7 @@ static int hisi_sas_task_prep(struct sas_task *task, } rc = hisi_sas_dma_map(hisi_hba, task, &n_elem, - &n_elem_req, &n_elem_resp); + &n_elem_req); if (rc < 0) goto prep_out; @@ -519,19 +485,14 @@ static int hisi_sas_task_prep(struct sas_task *task, slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; - spin_lock_irqsave(&dq->lock, flags); - wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); - if (wr_q_index < 0) { - spin_unlock_irqrestore(&dq->lock, flags); - rc = -EAGAIN; - goto err_out_tag; - } - + spin_lock(&dq->lock); + wr_q_index = dq->wr_point; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); - spin_unlock_irqrestore(&dq->lock, flags); - spin_lock_irqsave(&sas_dev->lock, flags); + spin_unlock(&dq->lock); + spin_lock(&sas_dev->lock); list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; @@ -551,7 +512,8 @@ static int hisi_sas_task_prep(struct sas_task *task, memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); - memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, + sizeof(struct hisi_sas_err_record)); switch (task->task_proto) { case SAS_PROTOCOL_SMP: @@ -580,14 +542,12 @@ static int hisi_sas_task_prep(struct sas_task *task, return 0; -err_out_tag: - hisi_sas_slot_index_free(hisi_hba, slot_idx); err_out_dif_dma_unmap: if (!sas_protocol_ata(task->task_proto)) hisi_sas_dif_dma_unmap(hisi_hba, task, n_elem_dif); err_out_dma_unmap: hisi_sas_dma_unmap(hisi_hba, task, n_elem, - n_elem_req, n_elem_resp); + n_elem_req); prep_out: dev_err(dev, "task prep: failed[%d]!\n", rc); return rc; @@ -598,7 +558,6 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, { u32 rc; u32 pass = 0; - unsigned long flags; struct hisi_hba *hisi_hba; struct device *dev; struct domain_device *device = task->dev; @@ -623,7 +582,13 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, dev = hisi_hba->dev; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) { - if (in_softirq()) + /* + * For IOs from upper layer, it may already disable preempt + * in the IO path, if disable preempt again in down(), + * function schedule() will report schedule_bug(), so check + * preemptible() before goto down(). + */ + if (!preemptible()) return -EINVAL; down(&hisi_hba->sem); @@ -636,9 +601,9 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, dev_err(dev, "task exec: failed[%d]!\n", rc); if (likely(pass)) { - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock(&dq->lock); } return rc; @@ -689,12 +654,11 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) { struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct hisi_sas_device *sas_dev = NULL; - unsigned long flags; int last = hisi_hba->last_dev_id; int first = (hisi_hba->last_dev_id + 1) % HISI_SAS_MAX_DEVICES; int i; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); for (i = first; i != last; i %= HISI_SAS_MAX_DEVICES) { if (hisi_hba->devices[i].dev_type == SAS_PHY_UNUSED) { int queue = i % hisi_hba->queue_count; @@ -714,18 +678,18 @@ static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device) i++; } hisi_hba->last_dev_id = i; - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return sas_dev; } -#define HISI_SAS_SRST_ATA_DISK_CNT 3 +#define HISI_SAS_DISK_RECOVER_CNT 3 static int hisi_sas_init_device(struct domain_device *device) { int rc = TMF_RESP_FUNC_COMPLETE; struct scsi_lun lun; struct hisi_sas_tmf_task tmf_task; - int retry = HISI_SAS_SRST_ATA_DISK_CNT; + int retry = HISI_SAS_DISK_RECOVER_CNT; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; struct sas_phy *local_phy; @@ -735,10 +699,14 @@ static int hisi_sas_init_device(struct domain_device *device) int_to_scsilun(0, &lun); tmf_task.tmf = TMF_CLEAR_TASK_SET; - rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, - &tmf_task); - if (rc == TMF_RESP_FUNC_COMPLETE) - hisi_sas_release_task(hisi_hba, device); + while (retry-- > 0) { + rc = hisi_sas_debug_issue_ssp_tmf(device, lun.scsi_lun, + &tmf_task); + if (rc == TMF_RESP_FUNC_COMPLETE) { + hisi_sas_release_task(hisi_hba, device); + break; + } + } break; case SAS_SATA_DEV: case SAS_SATA_PM: @@ -1000,12 +968,13 @@ static void hisi_sas_port_notify_formed(struct asd_sas_phy *sas_phy) struct hisi_hba *hisi_hba = sas_ha->lldd_ha; struct hisi_sas_phy *phy = sas_phy->lldd_phy; struct asd_sas_port *sas_port = sas_phy->port; - struct hisi_sas_port *port = to_hisi_sas_port(sas_port); + struct hisi_sas_port *port; unsigned long flags; if (!sas_port) return; + port = to_hisi_sas_port(sas_port); spin_lock_irqsave(&hisi_hba->lock, flags); port->port_attached = 1; port->id = phy->port_id; @@ -1077,25 +1046,30 @@ static void hisi_sas_dev_gone(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; + int ret = 0; dev_info(dev, "dev[%d:%x] is gone\n", sas_dev->device_id, sas_dev->dev_type); + down(&hisi_hba->sem); if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) { hisi_sas_internal_task_abort(hisi_hba, device, HISI_SAS_INT_ABT_DEV, 0); hisi_sas_dereg_device(hisi_hba, device); - down(&hisi_hba->sem); - hisi_hba->hw->clear_itct(hisi_hba, sas_dev); - up(&hisi_hba->sem); + ret = hisi_hba->hw->clear_itct(hisi_hba, sas_dev); device->lldd_dev = NULL; } if (hisi_hba->hw->free_device) hisi_hba->hw->free_device(sas_dev); - sas_dev->dev_type = SAS_PHY_UNUSED; + + /* Don't mark it as SAS_PHY_UNUSED if failed to clear ITCT */ + if (!ret) + sas_dev->dev_type = SAS_PHY_UNUSED; + sas_dev->sas_device = NULL; + up(&hisi_hba->sem); } static int hisi_sas_queue_command(struct sas_task *task, gfp_t gfp_flags) @@ -1253,10 +1227,10 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } @@ -1423,8 +1397,7 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba) } } -static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, - u32 state) +static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state) { struct sas_ha_struct *sas_ha = &hisi_hba->sha; struct asd_sas_port *_sas_port = NULL; @@ -1434,7 +1407,7 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 old_state, struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct asd_sas_port *sas_port = sas_phy->port; - bool do_port_check = !!(_sas_port != sas_port); + bool do_port_check = _sas_port != sas_port; if (!sas_phy->phy->enabled) continue; @@ -1576,16 +1549,16 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba) msleep(1000); hisi_sas_refresh_port_id(hisi_hba); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); - up(&hisi_hba->sem); if (hisi_hba->reject_stp_links_msk) hisi_sas_terminate_stp_reject(hisi_hba); hisi_sas_reset_init_all_devices(hisi_hba); + up(&hisi_hba->sem); scsi_unblock_requests(shost); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); state = hisi_hba->hw->get_phys_state(hisi_hba); - hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state, state); + hisi_sas_rescan_topology(hisi_hba, state); } EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done); @@ -1595,7 +1568,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba) struct Scsi_Host *shost = hisi_hba->shost; int rc; - if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct) + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); if (!hisi_hba->hw->soft_reset) @@ -1647,11 +1620,11 @@ static int hisi_sas_abort_task(struct sas_task *task) if (slot) { /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ cq = &hisi_hba->cq[slot->dlvry_queue]; - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); } spin_unlock_irqrestore(&task->task_state_lock, flags); rc = TMF_RESP_FUNC_COMPLETE; @@ -1715,10 +1688,10 @@ static int hisi_sas_abort_task(struct sas_task *task) if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) && task->lldd_task) { /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } } @@ -1770,24 +1743,34 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) struct hisi_sas_device *sas_dev = device->lldd_dev; struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct sas_ha_struct *sas_ha = &hisi_hba->sha; - struct asd_sas_phy *sas_phy = sas_ha->sas_phy[local_phy->number]; - struct hisi_sas_phy *phy = container_of(sas_phy, - struct hisi_sas_phy, sas_phy); DECLARE_COMPLETION_ONSTACK(phyreset); int rc, reset_type; + if (!local_phy->enabled) { + sas_put_local_phy(local_phy); + return -ENODEV; + } + if (scsi_is_sas_phy_local(local_phy)) { + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); phy->in_reset = 1; phy->reset_completion = &phyreset; } reset_type = (sas_dev->dev_status == HISI_SAS_DEV_INIT || - !dev_is_sata(device)) ? 1 : 0; + !dev_is_sata(device)) ? true : false; rc = sas_phy_reset(local_phy, reset_type); sas_put_local_phy(local_phy); if (scsi_is_sas_phy_local(local_phy)) { + struct asd_sas_phy *sas_phy = + sas_ha->sas_phy[local_phy->number]; + struct hisi_sas_phy *phy = + container_of(sas_phy, struct hisi_sas_phy, sas_phy); int ret = wait_for_completion_timeout(&phyreset, 2 * HZ); unsigned long flags; @@ -1802,9 +1785,10 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device) } else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) { /* * If in init state, we rely on caller to wait for link to be - * ready; otherwise, delay. + * ready; otherwise, except phy reset is fail, delay. */ - msleep(2000); + if (!rc) + msleep(2000); } return rc; @@ -1845,21 +1829,21 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) struct device *dev = hisi_hba->dev; int rc = TMF_RESP_FUNC_FAILED; + /* Clear internal IO and then lu reset */ + rc = hisi_sas_internal_task_abort(hisi_hba, device, + HISI_SAS_INT_ABT_DEV, 0); + if (rc < 0) { + dev_err(dev, "lu_reset: internal abort failed\n"); + goto out; + } + hisi_sas_dereg_device(hisi_hba, device); + if (dev_is_sata(device)) { struct sas_phy *phy; - /* Clear internal IO and then hardreset */ - rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); - if (rc < 0) { - dev_err(dev, "lu_reset: internal abort failed\n"); - goto out; - } - hisi_sas_dereg_device(hisi_hba, device); - phy = sas_get_local_phy(device); - rc = sas_phy_reset(phy, 1); + rc = sas_phy_reset(phy, true); if (rc == 0) hisi_sas_release_task(hisi_hba, device); @@ -1867,14 +1851,6 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun) } else { struct hisi_sas_tmf_task tmf_task = { .tmf = TMF_LU_RESET }; - rc = hisi_sas_internal_task_abort(hisi_hba, device, - HISI_SAS_INT_ABT_DEV, 0); - if (rc < 0) { - dev_err(dev, "lu_reset: internal abort failed\n"); - goto out; - } - hisi_sas_dereg_device(hisi_hba, device); - rc = hisi_sas_debug_issue_ssp_tmf(device, lun, &tmf_task); if (rc == TMF_RESP_FUNC_COMPLETE) hisi_sas_release_task(hisi_hba, device); @@ -1964,7 +1940,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, struct asd_sas_port *sas_port = device->port; struct hisi_sas_cmd_hdr *cmd_hdr_base; int dlvry_queue_slot, dlvry_queue, n_elem = 0, rc, slot_idx; - unsigned long flags, flags_dq = 0; + unsigned long flags; int wr_q_index; if (unlikely(test_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags))) @@ -1983,18 +1959,14 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot_idx = rc; slot = &hisi_hba->slot_info[slot_idx]; - spin_lock_irqsave(&dq->lock, flags_dq); - wr_q_index = hisi_hba->hw->get_free_slot(hisi_hba, dq); - if (wr_q_index < 0) { - spin_unlock_irqrestore(&dq->lock, flags_dq); - rc = -EAGAIN; - goto err_out_tag; - } + spin_lock(&dq->lock); + wr_q_index = dq->wr_point; + dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; list_add_tail(&slot->delivery, &dq->list); - spin_unlock_irqrestore(&dq->lock, flags_dq); - spin_lock_irqsave(&sas_dev->lock, flags); + spin_unlock(&dq->lock); + spin_lock(&sas_dev->lock); list_add_tail(&slot->entry, &sas_dev->list); - spin_unlock_irqrestore(&sas_dev->lock, flags); + spin_unlock(&sas_dev->lock); dlvry_queue = dq->id; dlvry_queue_slot = wr_q_index; @@ -2012,7 +1984,8 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); memset(hisi_sas_cmd_hdr_addr_mem(slot), 0, HISI_SAS_COMMAND_TABLE_SZ); - memset(hisi_sas_status_buf_addr_mem(slot), 0, HISI_SAS_STATUS_BUF_SZ); + memset(hisi_sas_status_buf_addr_mem(slot), 0, + sizeof(struct hisi_sas_err_record)); hisi_sas_task_prep_abort(hisi_hba, slot, device_id, abort_flag, task_tag); @@ -2022,14 +1995,12 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, spin_unlock_irqrestore(&task->task_state_lock, flags); WRITE_ONCE(slot->ready, 1); /* send abort command to the chip */ - spin_lock_irqsave(&dq->lock, flags); + spin_lock(&dq->lock); hisi_hba->hw->start_delivery(dq); - spin_unlock_irqrestore(&dq->lock, flags); + spin_unlock(&dq->lock); return 0; -err_out_tag: - hisi_sas_slot_index_free(hisi_hba, slot_idx); err_out: dev_err(dev, "internal abort task prep: failed[%d]!\n", rc); @@ -2089,6 +2060,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, /* Internal abort timed out */ if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) { + if (hisi_sas_debugfs_enable && hisi_hba->debugfs_itct[0].itct) + queue_work(hisi_hba->wq, &hisi_hba->debugfs_work); + if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) { struct hisi_sas_slot *slot = task->lldd_task; @@ -2096,10 +2070,10 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue]; /* - * flush tasklet to avoid free'ing task + * sync irq to avoid free'ing task * before using task in IO completion */ - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); slot->task = NULL; } dev_err(dev, "internal task abort: timeout and not done.\n"); @@ -2123,7 +2097,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, } exit: - dev_dbg(dev, "internal task abort: task to dev %016llx task=%p resp: 0x%x sts 0x%x\n", + dev_dbg(dev, "internal task abort: task to dev %016llx task=%pK resp: 0x%x sts 0x%x\n", SAS_ADDR(device->sas_addr), task, task->task_status.resp, /* 0 is complete, -1 is undelivered */ task->task_status.stat); @@ -2151,7 +2125,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, case HISI_SAS_INT_ABT_DEV: for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - const struct cpumask *mask = cq->pci_irq_mask; + const struct cpumask *mask = cq->irq_mask; if (mask && !cpumask_intersects(cpu_online_mask, mask)) continue; @@ -2245,17 +2219,17 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy) } EXPORT_SYMBOL_GPL(hisi_sas_phy_down); -void hisi_sas_kill_tasklets(struct hisi_hba *hisi_hba) +void hisi_sas_sync_irqs(struct hisi_hba *hisi_hba) { int i; for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - tasklet_kill(&cq->tasklet); + synchronize_irq(cq->irq_no); } } -EXPORT_SYMBOL_GPL(hisi_sas_kill_tasklets); +EXPORT_SYMBOL_GPL(hisi_sas_sync_irqs); int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type) { @@ -2291,7 +2265,7 @@ static struct sas_domain_function_template hisi_sas_transport_ops = { void hisi_sas_init_mem(struct hisi_hba *hisi_hba) { - int i, s, j, max_command_entries = hisi_hba->hw->max_command_entries; + int i, s, j, max_command_entries = HISI_SAS_MAX_COMMANDS; struct hisi_sas_breakpoint *sata_breakpoint = hisi_hba->sata_breakpoint; for (i = 0; i < hisi_hba->queue_count; i++) { @@ -2328,7 +2302,7 @@ EXPORT_SYMBOL_GPL(hisi_sas_init_mem); int hisi_sas_alloc(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; - int i, j, s, max_command_entries = hisi_hba->hw->max_command_entries; + int i, j, s, max_command_entries = HISI_SAS_MAX_COMMANDS; int max_command_entries_ru, sz_slot_buf_ru; int blk_cnt, slots_per_blk; @@ -2379,7 +2353,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) s = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); hisi_hba->itct = dmam_alloc_coherent(dev, s, &hisi_hba->itct_dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL); if (!hisi_hba->itct) goto err_out; @@ -2396,7 +2370,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) else sz_slot_buf_ru = sizeof(struct hisi_sas_slot_buf_table); sz_slot_buf_ru = roundup(sz_slot_buf_ru, 64); - s = lcm(max_command_entries_ru, sz_slot_buf_ru); + s = max(lcm(max_command_entries_ru, sz_slot_buf_ru), PAGE_SIZE); blk_cnt = (max_command_entries_ru * sz_slot_buf_ru) / s; slots_per_blk = s / sz_slot_buf_ru; @@ -2406,7 +2380,7 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) void *buf; buf = dmam_alloc_coherent(dev, s, &buf_dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL); if (!buf) goto err_out; @@ -2455,11 +2429,9 @@ int hisi_sas_alloc(struct hisi_hba *hisi_hba) GFP_KERNEL); if (!hisi_hba->sata_breakpoint) goto err_out; - hisi_sas_init_mem(hisi_hba); hisi_sas_slot_index_init(hisi_hba); - hisi_hba->last_slot_index = hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT; + hisi_hba->last_slot_index = HISI_SAS_UNRESERVED_IPTT; hisi_hba->wq = create_singlethread_workqueue(dev_name(dev)); if (!hisi_hba->wq) { @@ -2610,8 +2582,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, goto err_out; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - hisi_hba->regs = devm_ioremap_resource(dev, res); + hisi_hba->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hisi_hba->regs)) goto err_out; @@ -2672,13 +2643,11 @@ int hisi_sas_probe(struct platform_device *pdev, shost->max_channel = 1; shost->max_cmd_len = 16; if (hisi_hba->hw->slot_index_alloc) { - shost->can_queue = hisi_hba->hw->max_command_entries; - shost->cmd_per_lun = hisi_hba->hw->max_command_entries; + shost->can_queue = HISI_SAS_MAX_COMMANDS; + shost->cmd_per_lun = HISI_SAS_MAX_COMMANDS; } else { - shost->can_queue = hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT; - shost->cmd_per_lun = hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT; + shost->can_queue = HISI_SAS_UNRESERVED_IPTT; + shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; } sha->sas_ha_name = DRV_NAME; @@ -2712,6 +2681,7 @@ int hisi_sas_probe(struct platform_device *pdev, err_out_register_ha: scsi_remove_host(shost); err_out_ha: + hisi_sas_debugfs_exit(hisi_hba); hisi_sas_free(hisi_hba); scsi_host_put(shost); return rc; @@ -2723,10 +2693,11 @@ struct dentry *hisi_sas_debugfs_dir; static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) { int queue_entry_size = hisi_hba->hw->complete_hdr_size; + int dump_index = hisi_hba->debugfs_dump_index; int i; for (i = 0; i < hisi_hba->queue_count; i++) - memcpy(hisi_hba->debugfs_complete_hdr[i], + memcpy(hisi_hba->debugfs_cq[dump_index][i].complete_hdr, hisi_hba->complete_hdr[i], HISI_SAS_QUEUE_SLOTS * queue_entry_size); } @@ -2734,13 +2705,14 @@ static void hisi_sas_debugfs_snapshot_cq_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) { int queue_entry_size = sizeof(struct hisi_sas_cmd_hdr); + int dump_index = hisi_hba->debugfs_dump_index; int i; for (i = 0; i < hisi_hba->queue_count; i++) { - struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; + struct hisi_sas_cmd_hdr *debugfs_cmd_hdr, *cmd_hdr; int j; - debugfs_cmd_hdr = hisi_hba->debugfs_cmd_hdr[i]; + debugfs_cmd_hdr = hisi_hba->debugfs_dq[dump_index][i].hdr; cmd_hdr = hisi_hba->cmd_hdr[i]; for (j = 0; j < HISI_SAS_QUEUE_SLOTS; j++) @@ -2751,6 +2723,7 @@ static void hisi_sas_debugfs_snapshot_dq_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) { + int dump_index = hisi_hba->debugfs_dump_index; const struct hisi_sas_debugfs_reg *port = hisi_hba->hw->debugfs_reg_port; int i, phy_cnt; @@ -2758,7 +2731,7 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) u32 *databuf; for (phy_cnt = 0; phy_cnt < hisi_hba->n_phy; phy_cnt++) { - databuf = (u32 *)hisi_hba->debugfs_port_reg[phy_cnt]; + databuf = hisi_hba->debugfs_port_reg[dump_index][phy_cnt].data; for (i = 0; i < port->count; i++, databuf++) { offset = port->base_off + 4 * i; *databuf = port->read_port_reg(hisi_hba, phy_cnt, @@ -2769,21 +2742,56 @@ static void hisi_sas_debugfs_snapshot_port_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_global_reg(struct hisi_hba *hisi_hba) { - u32 *databuf = (u32 *)hisi_hba->debugfs_global_reg; + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL].data; + const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_debugfs_reg *global = - hisi_hba->hw->debugfs_reg_global; + hw->debugfs_reg_array[DEBUGFS_GLOBAL]; int i; for (i = 0; i < global->count; i++, databuf++) *databuf = global->read_global_reg(hisi_hba, 4 * i); } +static void hisi_sas_debugfs_snapshot_axi_reg(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI].data; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *axi = + hw->debugfs_reg_array[DEBUGFS_AXI]; + int i; + + for (i = 0; i < axi->count; i++, databuf++) + *databuf = axi->read_global_reg(hisi_hba, + 4 * i + axi->base_off); +} + +static void hisi_sas_debugfs_snapshot_ras_reg(struct hisi_hba *hisi_hba) +{ + int dump_index = hisi_hba->debugfs_dump_index; + u32 *databuf = hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS].data; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const struct hisi_sas_debugfs_reg *ras = + hw->debugfs_reg_array[DEBUGFS_RAS]; + int i; + + for (i = 0; i < ras->count; i++, databuf++) + *databuf = ras->read_global_reg(hisi_hba, + 4 * i + ras->base_off); +} + static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) { - void *databuf = hisi_hba->debugfs_itct; + int dump_index = hisi_hba->debugfs_dump_index; + void *cachebuf = hisi_hba->debugfs_itct_cache[dump_index].cache; + void *databuf = hisi_hba->debugfs_itct[dump_index].itct; struct hisi_sas_itct *itct; int i; + hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_ITCT_CACHE, + cachebuf); + itct = hisi_hba->itct; for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { @@ -2794,11 +2802,16 @@ static void hisi_sas_debugfs_snapshot_itct_reg(struct hisi_hba *hisi_hba) static void hisi_sas_debugfs_snapshot_iost_reg(struct hisi_hba *hisi_hba) { - int max_command_entries = hisi_hba->hw->max_command_entries; - void *databuf = hisi_hba->debugfs_iost; + int dump_index = hisi_hba->debugfs_dump_index; + int max_command_entries = HISI_SAS_MAX_COMMANDS; + void *cachebuf = hisi_hba->debugfs_iost_cache[dump_index].cache; + void *databuf = hisi_hba->debugfs_iost[dump_index].iost; struct hisi_sas_iost *iost; int i; + hisi_hba->hw->read_iost_itct_cache(hisi_hba, HISI_SAS_IOST_CACHE, + cachebuf); + iost = hisi_hba->iost; for (i = 0; i < max_command_entries; i++, iost++) { @@ -2843,11 +2856,12 @@ static void hisi_sas_debugfs_print_reg(u32 *regs_val, const void *ptr, static int hisi_sas_debugfs_global_show(struct seq_file *s, void *p) { - struct hisi_hba *hisi_hba = s->private; + struct hisi_sas_debugfs_regs *global = s->private; + struct hisi_hba *hisi_hba = global->hisi_hba; const struct hisi_sas_hw *hw = hisi_hba->hw; - const struct hisi_sas_debugfs_reg *reg_global = hw->debugfs_reg_global; + const void *reg_global = hw->debugfs_reg_array[DEBUGFS_GLOBAL]; - hisi_sas_debugfs_print_reg(hisi_hba->debugfs_global_reg, + hisi_sas_debugfs_print_reg(global->data, reg_global, s); return 0; @@ -2867,15 +2881,69 @@ static const struct file_operations hisi_sas_debugfs_global_fops = { .owner = THIS_MODULE, }; +static int hisi_sas_debugfs_axi_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_regs *axi = s->private; + struct hisi_hba *hisi_hba = axi->hisi_hba; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const void *reg_axi = hw->debugfs_reg_array[DEBUGFS_AXI]; + + hisi_sas_debugfs_print_reg(axi->data, + reg_axi, s); + + return 0; +} + +static int hisi_sas_debugfs_axi_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_axi_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_axi_fops = { + .open = hisi_sas_debugfs_axi_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static int hisi_sas_debugfs_ras_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_regs *ras = s->private; + struct hisi_hba *hisi_hba = ras->hisi_hba; + const struct hisi_sas_hw *hw = hisi_hba->hw; + const void *reg_ras = hw->debugfs_reg_array[DEBUGFS_RAS]; + + hisi_sas_debugfs_print_reg(ras->data, + reg_ras, s); + + return 0; +} + +static int hisi_sas_debugfs_ras_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_ras_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_ras_fops = { + .open = hisi_sas_debugfs_ras_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static int hisi_sas_debugfs_port_show(struct seq_file *s, void *p) { - struct hisi_sas_phy *phy = s->private; + struct hisi_sas_debugfs_port *port = s->private; + struct hisi_sas_phy *phy = port->phy; struct hisi_hba *hisi_hba = phy->hisi_hba; const struct hisi_sas_hw *hw = hisi_hba->hw; const struct hisi_sas_debugfs_reg *reg_port = hw->debugfs_reg_port; - u32 *databuf = hisi_hba->debugfs_port_reg[phy->sas_phy.id]; - hisi_sas_debugfs_print_reg(databuf, reg_port, s); + hisi_sas_debugfs_print_reg(port->data, reg_port, s); return 0; } @@ -2893,8 +2961,8 @@ static const struct file_operations hisi_sas_debugfs_port_fops = { .owner = THIS_MODULE, }; -static int hisi_sas_show_row_64(struct seq_file *s, int index, - int sz, __le64 *ptr) +static void hisi_sas_show_row_64(struct seq_file *s, int index, + int sz, __le64 *ptr) { int i; @@ -2907,12 +2975,10 @@ static int hisi_sas_show_row_64(struct seq_file *s, int index, } seq_puts(s, "\n"); - - return 0; } -static int hisi_sas_show_row_32(struct seq_file *s, int index, - int sz, __le32 *ptr) +static void hisi_sas_show_row_32(struct seq_file *s, int index, + int sz, __le32 *ptr) { int i; @@ -2924,32 +2990,28 @@ static int hisi_sas_show_row_32(struct seq_file *s, int index, seq_puts(s, "\n\t"); } seq_puts(s, "\n"); - - return 0; } -static int hisi_sas_cq_show_slot(struct seq_file *s, int slot, void *cq_ptr) +static void hisi_sas_cq_show_slot(struct seq_file *s, int slot, + struct hisi_sas_debugfs_cq *debugfs_cq) { - struct hisi_sas_cq *cq = cq_ptr; + struct hisi_sas_cq *cq = debugfs_cq->cq; struct hisi_hba *hisi_hba = cq->hisi_hba; - void *complete_queue = hisi_hba->debugfs_complete_hdr[cq->id]; - __le32 *complete_hdr = complete_queue + - (hisi_hba->hw->complete_hdr_size * slot); + __le32 *complete_hdr = debugfs_cq->complete_hdr + + (hisi_hba->hw->complete_hdr_size * slot); - return hisi_sas_show_row_32(s, slot, - hisi_hba->hw->complete_hdr_size, - complete_hdr); + hisi_sas_show_row_32(s, slot, + hisi_hba->hw->complete_hdr_size, + complete_hdr); } static int hisi_sas_debugfs_cq_show(struct seq_file *s, void *p) { - struct hisi_sas_cq *cq = s->private; - int slot, ret; + struct hisi_sas_debugfs_cq *debugfs_cq = s->private; + int slot; for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { - ret = hisi_sas_cq_show_slot(s, slot, cq); - if (ret) - return ret; + hisi_sas_cq_show_slot(s, slot, debugfs_cq); } return 0; } @@ -2967,26 +3029,22 @@ static const struct file_operations hisi_sas_debugfs_cq_fops = { .owner = THIS_MODULE, }; -static int hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) +static void hisi_sas_dq_show_slot(struct seq_file *s, int slot, void *dq_ptr) { - struct hisi_sas_dq *dq = dq_ptr; - struct hisi_hba *hisi_hba = dq->hisi_hba; - void *cmd_queue = hisi_hba->debugfs_cmd_hdr[dq->id]; + struct hisi_sas_debugfs_dq *debugfs_dq = dq_ptr; + void *cmd_queue = debugfs_dq->hdr; __le32 *cmd_hdr = cmd_queue + sizeof(struct hisi_sas_cmd_hdr) * slot; - return hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), - cmd_hdr); + hisi_sas_show_row_32(s, slot, sizeof(struct hisi_sas_cmd_hdr), cmd_hdr); } static int hisi_sas_debugfs_dq_show(struct seq_file *s, void *p) { - int slot, ret; + int slot; for (slot = 0; slot < HISI_SAS_QUEUE_SLOTS; slot++) { - ret = hisi_sas_dq_show_slot(s, slot, s->private); - if (ret) - return ret; + hisi_sas_dq_show_slot(s, slot, s->private); } return 0; } @@ -3006,16 +3064,14 @@ static const struct file_operations hisi_sas_debugfs_dq_fops = { static int hisi_sas_debugfs_iost_show(struct seq_file *s, void *p) { - struct hisi_hba *hisi_hba = s->private; - struct hisi_sas_iost *debugfs_iost = hisi_hba->debugfs_iost; - int i, ret, max_command_entries = hisi_hba->hw->max_command_entries; - __le64 *iost = &debugfs_iost->qw0; + struct hisi_sas_debugfs_iost *debugfs_iost = s->private; + struct hisi_sas_iost *iost = debugfs_iost->iost; + int i, max_command_entries = HISI_SAS_MAX_COMMANDS; + + for (i = 0; i < max_command_entries; i++, iost++) { + __le64 *data = &iost->qw0; - for (i = 0; i < max_command_entries; i++, debugfs_iost++) { - ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_iost), - iost); - if (ret) - return ret; + hisi_sas_show_row_64(s, i, sizeof(*iost), data); } return 0; @@ -3034,18 +3090,55 @@ static const struct file_operations hisi_sas_debugfs_iost_fops = { .owner = THIS_MODULE, }; +static int hisi_sas_debugfs_iost_cache_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_iost_cache *debugfs_iost_cache = s->private; + struct hisi_sas_iost_itct_cache *iost_cache = debugfs_iost_cache->cache; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + int i, tab_idx; + __le64 *iost; + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, iost_cache++) { + /* + * Data struct of IOST cache: + * Data[1]: BIT0~15: Table index + * Bit16: Valid mask + * Data[2]~[9]: IOST table + */ + tab_idx = (iost_cache->data[1] & 0xffff); + iost = (__le64 *)iost_cache; + + hisi_sas_show_row_64(s, tab_idx, cache_size, iost); + } + + return 0; +} + +static int hisi_sas_debugfs_iost_cache_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_iost_cache_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_iost_cache_fops = { + .open = hisi_sas_debugfs_iost_cache_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static int hisi_sas_debugfs_itct_show(struct seq_file *s, void *p) { - int i, ret; - struct hisi_hba *hisi_hba = s->private; - struct hisi_sas_itct *debugfs_itct = hisi_hba->debugfs_itct; - __le64 *itct = &debugfs_itct->qw0; + int i; + struct hisi_sas_debugfs_itct *debugfs_itct = s->private; + struct hisi_sas_itct *itct = debugfs_itct->itct; - for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, debugfs_itct++) { - ret = hisi_sas_show_row_64(s, i, sizeof(*debugfs_itct), - itct); - if (ret) - return ret; + for (i = 0; i < HISI_SAS_MAX_ITCT_ENTRIES; i++, itct++) { + __le64 *data = &itct->qw0; + + hisi_sas_show_row_64(s, i, sizeof(*itct), data); } return 0; @@ -3064,8 +3157,49 @@ static const struct file_operations hisi_sas_debugfs_itct_fops = { .owner = THIS_MODULE, }; +static int hisi_sas_debugfs_itct_cache_show(struct seq_file *s, void *p) +{ + struct hisi_sas_debugfs_itct_cache *debugfs_itct_cache = s->private; + struct hisi_sas_iost_itct_cache *itct_cache = debugfs_itct_cache->cache; + u32 cache_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * 4; + int i, tab_idx; + __le64 *itct; + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_NUM; i++, itct_cache++) { + /* + * Data struct of ITCT cache: + * Data[1]: BIT0~15: Table index + * Bit16: Valid mask + * Data[2]~[9]: ITCT table + */ + tab_idx = itct_cache->data[1] & 0xffff; + itct = (__le64 *)itct_cache; + + hisi_sas_show_row_64(s, tab_idx, cache_size, itct); + } + + return 0; +} + +static int hisi_sas_debugfs_itct_cache_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_itct_cache_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_itct_cache_fops = { + .open = hisi_sas_debugfs_itct_cache_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) { + u64 *debugfs_timestamp; + int dump_index = hisi_hba->debugfs_dump_index; struct dentry *dump_dentry; struct dentry *dentry; char name[256]; @@ -3073,19 +3207,26 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) int c; int d; - /* Create dump dir inside device dir */ - dump_dentry = debugfs_create_dir("dump", hisi_hba->debugfs_dir); - hisi_hba->debugfs_dump_dentry = dump_dentry; + snprintf(name, 256, "%d", dump_index); + + dump_dentry = debugfs_create_dir(name, hisi_hba->debugfs_dump_dentry); + + debugfs_timestamp = &hisi_hba->debugfs_timestamp[dump_index]; - debugfs_create_file("global", 0400, dump_dentry, hisi_hba, - &hisi_sas_debugfs_global_fops); + debugfs_create_u64("timestamp", 0400, dump_dentry, + debugfs_timestamp); + + debugfs_create_file("global", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_GLOBAL], + &hisi_sas_debugfs_global_fops); /* Create port dir and files */ dentry = debugfs_create_dir("port", dump_dentry); for (p = 0; p < hisi_hba->n_phy; p++) { snprintf(name, 256, "%d", p); - debugfs_create_file(name, 0400, dentry, &hisi_hba->phy[p], + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_port_reg[dump_index][p], &hisi_sas_debugfs_port_fops); } @@ -3094,7 +3235,8 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) for (c = 0; c < hisi_hba->queue_count; c++) { snprintf(name, 256, "%d", c); - debugfs_create_file(name, 0400, dentry, &hisi_hba->cq[c], + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_cq[dump_index][c], &hisi_sas_debugfs_cq_fops); } @@ -3103,16 +3245,35 @@ static void hisi_sas_debugfs_create_files(struct hisi_hba *hisi_hba) for (d = 0; d < hisi_hba->queue_count; d++) { snprintf(name, 256, "%d", d); - debugfs_create_file(name, 0400, dentry, &hisi_hba->dq[d], + debugfs_create_file(name, 0400, dentry, + &hisi_hba->debugfs_dq[dump_index][d], &hisi_sas_debugfs_dq_fops); } - debugfs_create_file("iost", 0400, dump_dentry, hisi_hba, + debugfs_create_file("iost", 0400, dump_dentry, + &hisi_hba->debugfs_iost[dump_index], &hisi_sas_debugfs_iost_fops); - debugfs_create_file("itct", 0400, dump_dentry, hisi_hba, + debugfs_create_file("iost_cache", 0400, dump_dentry, + &hisi_hba->debugfs_iost_cache[dump_index], + &hisi_sas_debugfs_iost_cache_fops); + + debugfs_create_file("itct", 0400, dump_dentry, + &hisi_hba->debugfs_itct[dump_index], &hisi_sas_debugfs_itct_fops); + debugfs_create_file("itct_cache", 0400, dump_dentry, + &hisi_hba->debugfs_itct_cache[dump_index], + &hisi_sas_debugfs_itct_cache_fops); + + debugfs_create_file("axi", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_AXI], + &hisi_sas_debugfs_axi_fops); + + debugfs_create_file("ras", 0400, dump_dentry, + &hisi_hba->debugfs_regs[dump_index][DEBUGFS_RAS], + &hisi_sas_debugfs_ras_fops); + return; } @@ -3122,6 +3283,8 @@ static void hisi_sas_debugfs_snapshot_regs(struct hisi_hba *hisi_hba) hisi_sas_debugfs_snapshot_global_reg(hisi_hba); hisi_sas_debugfs_snapshot_port_reg(hisi_hba); + hisi_sas_debugfs_snapshot_axi_reg(hisi_hba); + hisi_sas_debugfs_snapshot_ras_reg(hisi_hba); hisi_sas_debugfs_snapshot_cq_reg(hisi_hba); hisi_sas_debugfs_snapshot_dq_reg(hisi_hba); hisi_sas_debugfs_snapshot_itct_reg(hisi_hba); @@ -3139,8 +3302,7 @@ static ssize_t hisi_sas_debugfs_trigger_dump_write(struct file *file, struct hisi_hba *hisi_hba = file->f_inode->i_private; char buf[8]; - /* A bit racy, but don't care too much since it's only debugfs */ - if (hisi_hba->debugfs_snapshot) + if (hisi_hba->debugfs_dump_index >= hisi_sas_debugfs_dump_count) return -EFAULT; if (count > 8) @@ -3162,102 +3324,632 @@ static const struct file_operations hisi_sas_debugfs_trigger_dump_fops = { .owner = THIS_MODULE, }; +enum { + HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL = 0, + HISI_SAS_BIST_LOOPBACK_MODE_SERDES, + HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, +}; + +enum { + HISI_SAS_BIST_CODE_MODE_PRBS7 = 0, + HISI_SAS_BIST_CODE_MODE_PRBS23, + HISI_SAS_BIST_CODE_MODE_PRBS31, + HISI_SAS_BIST_CODE_MODE_JTPAT, + HISI_SAS_BIST_CODE_MODE_CJTPAT, + HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, + HISI_SAS_BIST_CODE_MODE_TRAIN, + HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, + HISI_SAS_BIST_CODE_MODE_HFTP, + HISI_SAS_BIST_CODE_MODE_MFTP, + HISI_SAS_BIST_CODE_MODE_LFTP, + HISI_SAS_BIST_CODE_MODE_FIXED_DATA, +}; + +static const struct { + int value; + char *name; +} hisi_sas_debugfs_loop_linkrate[] = { + { SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" }, + { SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" }, + { SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" }, + { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" }, +}; + +static int hisi_sas_debugfs_bist_linkrate_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { + int match = (hisi_hba->debugfs_bist_linkrate == + hisi_sas_debugfs_loop_linkrate[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_linkrate[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t hisi_sas_debugfs_bist_linkrate_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EOVERFLOW; + + if (copy_from_user(kbuf, buf, count)) + return -EINVAL; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_linkrate); i++) { + if (!strncmp(hisi_sas_debugfs_loop_linkrate[i].name, + pkbuf, 16)) { + hisi_hba->debugfs_bist_linkrate = + hisi_sas_debugfs_loop_linkrate[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int hisi_sas_debugfs_bist_linkrate_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_linkrate_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_linkrate_ops = { + .open = hisi_sas_debugfs_bist_linkrate_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_linkrate_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct { + int value; + char *name; +} hisi_sas_debugfs_loop_code_mode[] = { + { HISI_SAS_BIST_CODE_MODE_PRBS7, "PRBS7" }, + { HISI_SAS_BIST_CODE_MODE_PRBS23, "PRBS23" }, + { HISI_SAS_BIST_CODE_MODE_PRBS31, "PRBS31" }, + { HISI_SAS_BIST_CODE_MODE_JTPAT, "JTPAT" }, + { HISI_SAS_BIST_CODE_MODE_CJTPAT, "CJTPAT" }, + { HISI_SAS_BIST_CODE_MODE_SCRAMBED_0, "SCRAMBED_0" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN, "TRAIN" }, + { HISI_SAS_BIST_CODE_MODE_TRAIN_DONE, "TRAIN_DONE" }, + { HISI_SAS_BIST_CODE_MODE_HFTP, "HFTP" }, + { HISI_SAS_BIST_CODE_MODE_MFTP, "MFTP" }, + { HISI_SAS_BIST_CODE_MODE_LFTP, "LFTP" }, + { HISI_SAS_BIST_CODE_MODE_FIXED_DATA, "FIXED_DATA" }, +}; + +static int hisi_sas_debugfs_bist_code_mode_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { + int match = (hisi_hba->debugfs_bist_code_mode == + hisi_sas_debugfs_loop_code_mode[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_code_mode[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t hisi_sas_debugfs_bist_code_mode_write(struct file *filp, + const char __user *buf, + size_t count, + loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EOVERFLOW; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_code_mode); i++) { + if (!strncmp(hisi_sas_debugfs_loop_code_mode[i].name, + pkbuf, 16)) { + hisi_hba->debugfs_bist_code_mode = + hisi_sas_debugfs_loop_code_mode[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int hisi_sas_debugfs_bist_code_mode_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_code_mode_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_code_mode_ops = { + .open = hisi_sas_debugfs_bist_code_mode_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_code_mode_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t hisi_sas_debugfs_bist_phy_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int phy_no; + int val; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + val = kstrtouint_from_user(buf, count, 0, &phy_no); + if (val) + return val; + + if (phy_no >= hisi_hba->n_phy) + return -EINVAL; + + hisi_hba->debugfs_bist_phy_no = phy_no; + + return count; +} + +static int hisi_sas_debugfs_bist_phy_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->debugfs_bist_phy_no); + + return 0; +} + +static int hisi_sas_debugfs_bist_phy_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_phy_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_phy_ops = { + .open = hisi_sas_debugfs_bist_phy_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_phy_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static const struct { + int value; + char *name; +} hisi_sas_debugfs_loop_modes[] = { + { HISI_SAS_BIST_LOOPBACK_MODE_DIGITAL, "digital" }, + { HISI_SAS_BIST_LOOPBACK_MODE_SERDES, "serdes" }, + { HISI_SAS_BIST_LOOPBACK_MODE_REMOTE, "remote" }, +}; + +static int hisi_sas_debugfs_bist_mode_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + int i; + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { + int match = (hisi_hba->debugfs_bist_mode == + hisi_sas_debugfs_loop_modes[i].value); + + seq_printf(s, "%s%s%s ", match ? "[" : "", + hisi_sas_debugfs_loop_modes[i].name, + match ? "]" : ""); + } + seq_puts(s, "\n"); + + return 0; +} + +static ssize_t hisi_sas_debugfs_bist_mode_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + char kbuf[16] = {}, *pkbuf; + bool found = false; + int i; + + if (hisi_hba->debugfs_bist_enable) + return -EPERM; + + if (count >= sizeof(kbuf)) + return -EINVAL; + + if (copy_from_user(kbuf, buf, count)) + return -EOVERFLOW; + + pkbuf = strstrip(kbuf); + + for (i = 0; i < ARRAY_SIZE(hisi_sas_debugfs_loop_modes); i++) { + if (!strncmp(hisi_sas_debugfs_loop_modes[i].name, pkbuf, 16)) { + hisi_hba->debugfs_bist_mode = + hisi_sas_debugfs_loop_modes[i].value; + found = true; + break; + } + } + + if (!found) + return -EINVAL; + + return count; +} + +static int hisi_sas_debugfs_bist_mode_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_mode_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_mode_ops = { + .open = hisi_sas_debugfs_bist_mode_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_mode_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t hisi_sas_debugfs_bist_enable_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct hisi_hba *hisi_hba = m->private; + unsigned int enable; + int val; + + val = kstrtouint_from_user(buf, count, 0, &enable); + if (val) + return val; + + if (enable > 1) + return -EINVAL; + + if (enable == hisi_hba->debugfs_bist_enable) + return count; + + if (!hisi_hba->hw->set_bist) + return -EPERM; + + val = hisi_hba->hw->set_bist(hisi_hba, enable); + if (val < 0) + return val; + + hisi_hba->debugfs_bist_enable = enable; + + return count; +} + +static int hisi_sas_debugfs_bist_enable_show(struct seq_file *s, void *p) +{ + struct hisi_hba *hisi_hba = s->private; + + seq_printf(s, "%d\n", hisi_hba->debugfs_bist_enable); + + return 0; +} + +static int hisi_sas_debugfs_bist_enable_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_bist_enable_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_bist_enable_ops = { + .open = hisi_sas_debugfs_bist_enable_open, + .read = seq_read, + .write = hisi_sas_debugfs_bist_enable_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + +static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp, + const char __user *buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = filp->private_data; + struct hisi_sas_phy *phy = s->private; + unsigned int set_val; + int res; + + res = kstrtouint_from_user(buf, count, 0, &set_val); + if (res) + return res; + + if (set_val > 0) + return -EINVAL; + + atomic_set(&phy->down_cnt, 0); + + return count; +} + +static int hisi_sas_debugfs_phy_down_cnt_show(struct seq_file *s, void *p) +{ + struct hisi_sas_phy *phy = s->private; + + seq_printf(s, "%d\n", atomic_read(&phy->down_cnt)); + + return 0; +} + +static int hisi_sas_debugfs_phy_down_cnt_open(struct inode *inode, + struct file *filp) +{ + return single_open(filp, hisi_sas_debugfs_phy_down_cnt_show, + inode->i_private); +} + +static const struct file_operations hisi_sas_debugfs_phy_down_cnt_ops = { + .open = hisi_sas_debugfs_phy_down_cnt_open, + .read = seq_read, + .write = hisi_sas_debugfs_phy_down_cnt_write, + .llseek = seq_lseek, + .release = single_release, + .owner = THIS_MODULE, +}; + void hisi_sas_debugfs_work_handler(struct work_struct *work) { struct hisi_hba *hisi_hba = container_of(work, struct hisi_hba, debugfs_work); + int debugfs_dump_index = hisi_hba->debugfs_dump_index; + struct device *dev = hisi_hba->dev; + u64 timestamp = local_clock(); - if (hisi_hba->debugfs_snapshot) + if (debugfs_dump_index >= hisi_sas_debugfs_dump_count) { + dev_warn(dev, "dump count exceeded!\n"); return; - hisi_hba->debugfs_snapshot = true; + } + + do_div(timestamp, NSEC_PER_MSEC); + hisi_hba->debugfs_timestamp[debugfs_dump_index] = timestamp; hisi_sas_debugfs_snapshot_regs(hisi_hba); + hisi_hba->debugfs_dump_index++; } EXPORT_SYMBOL_GPL(hisi_sas_debugfs_work_handler); -void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) +static void hisi_sas_debugfs_release(struct hisi_hba *hisi_hba, int dump_index) { - int max_command_entries = hisi_hba->hw->max_command_entries; struct device *dev = hisi_hba->dev; - int p, i, c, d; - size_t sz; + int i; - hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), - hisi_sas_debugfs_dir); - debugfs_create_file("trigger_dump", 0600, - hisi_hba->debugfs_dir, - hisi_hba, - &hisi_sas_debugfs_trigger_dump_fops); + devm_kfree(dev, hisi_hba->debugfs_iost_cache[dump_index].cache); + devm_kfree(dev, hisi_hba->debugfs_itct_cache[dump_index].cache); + devm_kfree(dev, hisi_hba->debugfs_iost[dump_index].iost); + devm_kfree(dev, hisi_hba->debugfs_itct[dump_index].itct); - /* Alloc buffer for global */ - sz = hisi_hba->hw->debugfs_reg_global->count * 4; - hisi_hba->debugfs_global_reg = - devm_kmalloc(dev, sz, GFP_KERNEL); + for (i = 0; i < hisi_hba->queue_count; i++) + devm_kfree(dev, hisi_hba->debugfs_dq[dump_index][i].hdr); + + for (i = 0; i < hisi_hba->queue_count; i++) + devm_kfree(dev, + hisi_hba->debugfs_cq[dump_index][i].complete_hdr); - if (!hisi_hba->debugfs_global_reg) - goto fail_global; + for (i = 0; i < DEBUGFS_REGS_NUM; i++) + devm_kfree(dev, hisi_hba->debugfs_regs[dump_index][i].data); - /* Alloc buffer for port */ - sz = hisi_hba->hw->debugfs_reg_port->count * 4; + for (i = 0; i < hisi_hba->n_phy; i++) + devm_kfree(dev, hisi_hba->debugfs_port_reg[dump_index][i].data); +} + +static int hisi_sas_debugfs_alloc(struct hisi_hba *hisi_hba, int dump_index) +{ + const struct hisi_sas_hw *hw = hisi_hba->hw; + struct device *dev = hisi_hba->dev; + int p, c, d, r, i; + size_t sz; + + for (r = 0; r < DEBUGFS_REGS_NUM; r++) { + struct hisi_sas_debugfs_regs *regs = + &hisi_hba->debugfs_regs[dump_index][r]; + + sz = hw->debugfs_reg_array[r]->count * 4; + regs->data = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!regs->data) + goto fail; + regs->hisi_hba = hisi_hba; + } + + sz = hw->debugfs_reg_port->count * 4; for (p = 0; p < hisi_hba->n_phy; p++) { - hisi_hba->debugfs_port_reg[p] = - devm_kmalloc(dev, sz, GFP_KERNEL); + struct hisi_sas_debugfs_port *port = + &hisi_hba->debugfs_port_reg[dump_index][p]; - if (!hisi_hba->debugfs_port_reg[p]) - goto fail_port; + port->data = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!port->data) + goto fail; + port->phy = &hisi_hba->phy[p]; } - /* Alloc buffer for cq */ - sz = hisi_hba->hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; + sz = hw->complete_hdr_size * HISI_SAS_QUEUE_SLOTS; for (c = 0; c < hisi_hba->queue_count; c++) { - hisi_hba->debugfs_complete_hdr[c] = - devm_kmalloc(dev, sz, GFP_KERNEL); + struct hisi_sas_debugfs_cq *cq = + &hisi_hba->debugfs_cq[dump_index][c]; - if (!hisi_hba->debugfs_complete_hdr[c]) - goto fail_cq; + cq->complete_hdr = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!cq->complete_hdr) + goto fail; + cq->cq = &hisi_hba->cq[c]; } - /* Alloc buffer for dq */ sz = sizeof(struct hisi_sas_cmd_hdr) * HISI_SAS_QUEUE_SLOTS; for (d = 0; d < hisi_hba->queue_count; d++) { - hisi_hba->debugfs_cmd_hdr[d] = - devm_kmalloc(dev, sz, GFP_KERNEL); + struct hisi_sas_debugfs_dq *dq = + &hisi_hba->debugfs_dq[dump_index][d]; - if (!hisi_hba->debugfs_cmd_hdr[d]) - goto fail_iost_dq; + dq->hdr = devm_kmalloc(dev, sz, GFP_KERNEL); + if (!dq->hdr) + goto fail; + dq->dq = &hisi_hba->dq[d]; } - /* Alloc buffer for iost */ - sz = max_command_entries * sizeof(struct hisi_sas_iost); + sz = HISI_SAS_MAX_COMMANDS * sizeof(struct hisi_sas_iost); + + hisi_hba->debugfs_iost[dump_index].iost = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost[dump_index].iost) + goto fail; + + sz = HISI_SAS_IOST_ITCT_CACHE_NUM * + sizeof(struct hisi_sas_iost_itct_cache); + + hisi_hba->debugfs_iost_cache[dump_index].cache = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_iost_cache[dump_index].cache) + goto fail; + + sz = HISI_SAS_IOST_ITCT_CACHE_NUM * + sizeof(struct hisi_sas_iost_itct_cache); - hisi_hba->debugfs_iost = devm_kmalloc(dev, sz, GFP_KERNEL); - if (!hisi_hba->debugfs_iost) - goto fail_iost_dq; + hisi_hba->debugfs_itct_cache[dump_index].cache = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct_cache[dump_index].cache) + goto fail; - /* Alloc buffer for itct */ /* New memory allocation must be locate before itct */ sz = HISI_SAS_MAX_ITCT_ENTRIES * sizeof(struct hisi_sas_itct); - hisi_hba->debugfs_itct = devm_kmalloc(dev, sz, GFP_KERNEL); - if (!hisi_hba->debugfs_itct) - goto fail_itct; + hisi_hba->debugfs_itct[dump_index].itct = + devm_kmalloc(dev, sz, GFP_KERNEL); + if (!hisi_hba->debugfs_itct[dump_index].itct) + goto fail; - return; -fail_itct: - devm_kfree(dev, hisi_hba->debugfs_iost); -fail_iost_dq: - for (i = 0; i < d; i++) - devm_kfree(dev, hisi_hba->debugfs_cmd_hdr[i]); -fail_cq: - for (i = 0; i < c; i++) - devm_kfree(dev, hisi_hba->debugfs_complete_hdr[i]); -fail_port: - for (i = 0; i < p; i++) - devm_kfree(dev, hisi_hba->debugfs_port_reg[i]); - devm_kfree(dev, hisi_hba->debugfs_global_reg); -fail_global: - debugfs_remove_recursive(hisi_hba->debugfs_dir); - dev_dbg(dev, "failed to init debugfs!\n"); + return 0; +fail: + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) + hisi_sas_debugfs_release(hisi_hba, i); + return -ENOMEM; +} + +static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba) +{ + struct dentry *dir = debugfs_create_dir("phy_down_cnt", + hisi_hba->debugfs_dir); + char name[16]; + int phy_no; + + for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) { + snprintf(name, 16, "%d", phy_no); + debugfs_create_file(name, 0600, dir, + &hisi_hba->phy[phy_no], + &hisi_sas_debugfs_phy_down_cnt_ops); + } +} + +static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba) +{ + hisi_hba->debugfs_bist_dentry = + debugfs_create_dir("bist", hisi_hba->debugfs_dir); + debugfs_create_file("link_rate", 0600, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &hisi_sas_debugfs_bist_linkrate_ops); + + debugfs_create_file("code_mode", 0600, + hisi_hba->debugfs_bist_dentry, hisi_hba, + &hisi_sas_debugfs_bist_code_mode_ops); + + debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_phy_ops); + + debugfs_create_u32("cnt", 0600, hisi_hba->debugfs_bist_dentry, + &hisi_hba->debugfs_bist_cnt); + + debugfs_create_file("loopback_mode", 0600, + hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_mode_ops); + + debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry, + hisi_hba, &hisi_sas_debugfs_bist_enable_ops); + + hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS; +} + +void hisi_sas_debugfs_init(struct hisi_hba *hisi_hba) +{ + struct device *dev = hisi_hba->dev; + int i; + + hisi_hba->debugfs_dir = debugfs_create_dir(dev_name(dev), + hisi_sas_debugfs_dir); + debugfs_create_file("trigger_dump", 0200, + hisi_hba->debugfs_dir, + hisi_hba, + &hisi_sas_debugfs_trigger_dump_fops); + + /* create bist structures */ + hisi_sas_debugfs_bist_init(hisi_hba); + + hisi_hba->debugfs_dump_dentry = + debugfs_create_dir("dump", hisi_hba->debugfs_dir); + + hisi_sas_debugfs_phy_down_cnt_init(hisi_hba); + + for (i = 0; i < hisi_sas_debugfs_dump_count; i++) { + if (hisi_sas_debugfs_alloc(hisi_hba, i)) { + debugfs_remove_recursive(hisi_hba->debugfs_dir); + dev_dbg(dev, "failed to init debugfs!\n"); + break; + } + } } EXPORT_SYMBOL_GPL(hisi_sas_debugfs_init); @@ -3290,14 +3982,24 @@ EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable); module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444); MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)"); +u32 hisi_sas_debugfs_dump_count = 1; +EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count); +module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444); +MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow"); + static __init int hisi_sas_init(void) { hisi_sas_stt = sas_domain_attach_transport(&hisi_sas_transport_ops); if (!hisi_sas_stt) return -ENOMEM; - if (hisi_sas_debugfs_enable) + if (hisi_sas_debugfs_enable) { hisi_sas_debugfs_dir = debugfs_create_dir("hisi_sas", NULL); + if (hisi_sas_debugfs_dump_count > HISI_SAS_MAX_DEBUGFS_DUMP) { + pr_info("hisi_sas: Limiting debugfs dump count\n"); + hisi_sas_debugfs_dump_count = HISI_SAS_MAX_DEBUGFS_DUMP; + } + } return 0; } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 3912216e8a4f..fa25766502a2 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -401,8 +401,6 @@ enum { TRANS_RX_SMP_RESP_TIMEOUT_ERR, /* 0x31a */ }; -#define HISI_SAS_COMMAND_ENTRIES_V1_HW 8192 - #define HISI_SAS_PHY_MAX_INT_NR (HISI_SAS_PHY_INT_NR * HISI_SAS_MAX_PHYS) #define HISI_SAS_CQ_MAX_INT_NR (HISI_SAS_MAX_QUEUES) #define HISI_SAS_FATAL_INT_NR (2) @@ -418,13 +416,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) return readl(regs); } -static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) -{ - void __iomem *regs = hisi_hba->regs + off; - - return readl_relaxed(regs); -} - static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { @@ -540,8 +531,8 @@ static void setup_itct_v1_hw(struct hisi_hba *hisi_hba, (0xff00ULL << ITCT_HDR_REJ_OPEN_TL_OFF)); } -static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_device *sas_dev) +static int clear_itct_v1_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) { u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; @@ -560,6 +551,8 @@ static void clear_itct_v1_hw(struct hisi_hba *hisi_hba, qw0 = le64_to_cpu(itct->qw0); qw0 &= ~ITCT_HDR_VALID_MSK; itct->qw0 = cpu_to_le64(qw0); + + return 0; } static int reset_hw_v1_hw(struct hisi_hba *hisi_hba) @@ -866,30 +859,6 @@ static int get_wideport_bitmap_v1_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/* - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v1_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "could not find free slot\n"); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - /* DQ lock must be taken here */ static void start_delivery_v1_hw(struct hisi_sas_dq *dq) { @@ -1308,21 +1277,17 @@ static int slot_complete_v1_hw(struct hisi_hba *hisi_hba, } case SAS_PROTOCOL_SMP: { - void *to; struct scatterlist *sg_resp = &task->smp_task.smp_resp; + void *to = page_address(sg_page(sg_resp)); ts->stat = SAM_STAT_GOOD; - to = kmap_atomic(sg_page(sg_resp)); - dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), - sg_dma_len(sg_resp)); - kunmap_atomic(to); + sg_resp->length); break; } case SAS_PROTOCOL_SATA: @@ -1534,11 +1499,9 @@ static irqreturn_t cq_interrupt_v1_hw(int irq, void *p) struct hisi_sas_complete_v1_hdr *complete_queue = (struct hisi_sas_complete_v1_hdr *) hisi_hba->complete_hdr[queue]; - u32 irq_value, rd_point = cq->rd_point, wr_point; + u32 rd_point = cq->rd_point, wr_point; spin_lock(&hisi_hba->lock); - irq_value = hisi_sas_read32(hisi_hba, OQ_INT_SRC); - hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); wr_point = hisi_sas_read32(hisi_hba, COMPL_Q_0_WR_PTR + (0x14 * queue)); @@ -1809,6 +1772,9 @@ static struct scsi_host_template sht_v1_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v1_hw, .host_reset = hisi_sas_host_reset, }; @@ -1820,9 +1786,7 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = { .clear_itct = clear_itct_v1_hw, .prep_smp = prep_smp_v1_hw, .prep_ssp = prep_ssp_v1_hw, - .get_free_slot = get_free_slot_v1_hw, .start_delivery = start_delivery_v1_hw, - .slot_complete = slot_complete_v1_hw, .phys_init = phys_init_v1_hw, .phy_start = start_phy_v1_hw, .phy_disable = disable_phy_v1_hw, @@ -1830,7 +1794,6 @@ static const struct hisi_sas_hw hisi_sas_v1_hw = { .phy_set_linkrate = phy_set_linkrate_v1_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v1_hw, .get_wideport_bitmap = get_wideport_bitmap_v1_hw, - .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V1_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v1_hdr), .sht = &sht_v1_hw, }; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index e9b15d45f98f..e05faf315dcd 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -773,7 +773,6 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_device *sas_dev = device->lldd_dev; int sata_idx = sas_dev->sata_idx; int start, end; - unsigned long flags; if (!sata_dev) { /* @@ -797,12 +796,12 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, end = 64 * (sata_idx + 2); } - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); while (1) { start = find_next_zero_bit(bitmap, hisi_hba->slot_index_count, start); if (start >= end) { - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return -SAS_QUEUE_FULL; } /* @@ -814,7 +813,7 @@ slot_index_alloc_quirk_v2_hw(struct hisi_hba *hisi_hba, } set_bit(start, bitmap); - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return start; } @@ -843,9 +842,8 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) struct hisi_sas_device *sas_dev = NULL; int i, sata_dev = dev_is_sata(device); int sata_idx = -1; - unsigned long flags; - spin_lock_irqsave(&hisi_hba->lock, flags); + spin_lock(&hisi_hba->lock); if (sata_dev) if (!sata_index_alloc_v2_hw(hisi_hba, &sata_idx)) @@ -876,7 +874,7 @@ hisi_sas_device *alloc_dev_quirk_v2_hw(struct domain_device *device) } out: - spin_unlock_irqrestore(&hisi_hba->lock, flags); + spin_unlock(&hisi_hba->lock); return sas_dev; } @@ -974,13 +972,14 @@ static void setup_itct_v2_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_device *sas_dev) +static int clear_itct_v2_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; int i; sas_dev->completion = &completion; @@ -990,13 +989,19 @@ static void clear_itct_v2_hw(struct hisi_hba *hisi_hba, hisi_sas_write32(hisi_hba, ENT_INT_SRC3, ENT_INT_SRC3_ITC_INT_MSK); + /* need to set register twice to clear ITCT for v2 hw */ for (i = 0; i < 2; i++) { reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); - wait_for_completion(sas_dev->completion); + if (!wait_for_completion_timeout(sas_dev->completion, + CLEAR_ITCT_TIMEOUT * HZ)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } memset(itct, 0, sizeof(struct hisi_sas_itct)); } + return 0; } static void free_device_v2_hw(struct hisi_sas_device *sas_dev) @@ -1637,31 +1642,6 @@ static int get_wideport_bitmap_v2_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/* - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n", - queue, r, w); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - /* DQ lock must be taken here */ static void start_delivery_v2_hw(struct hisi_sas_dq *dq) { @@ -2418,7 +2398,7 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) slot_err_v2_hw(hisi_hba, task, slot, 2); if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, complete_hdr->dw0, complete_hdr->dw1, complete_hdr->act, complete_hdr->dw3, @@ -2444,20 +2424,16 @@ slot_complete_v2_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; - void *to; + void *to = page_address(sg_page(sg_resp)); ts->stat = SAM_STAT_GOOD; - to = kmap_atomic(sg_page(sg_resp)); - dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), - sg_dma_len(sg_resp)); - kunmap_atomic(to); + sg_resp->length); break; } case SAS_PROTOCOL_SATA: @@ -2484,7 +2460,7 @@ out: spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%p) aborted\n", task); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); return SAS_ABORTED_TASK; } task->task_state_flags |= SAS_TASK_STATE_DONE; @@ -2495,7 +2471,7 @@ out: spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%p) ignored\n", + dev_info(dev, "slot complete: task(%pK) ignored\n", task); return sts; } @@ -2563,7 +2539,10 @@ static void prep_ata_v2_hw(struct hisi_hba *hisi_hba, hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ - if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } @@ -3130,9 +3109,9 @@ static irqreturn_t fatal_axi_int_v2_hw(int irq_no, void *p) return IRQ_HANDLED; } -static void cq_tasklet_v2_hw(unsigned long val) +static irqreturn_t cq_thread_v2_hw(int irq_no, void *p) { - struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; + struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_itct *itct; @@ -3200,6 +3179,8 @@ static void cq_tasklet_v2_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) @@ -3210,9 +3191,7 @@ static irqreturn_t cq_interrupt_v2_hw(int irq_no, void *p) hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); - tasklet_schedule(&cq->tasklet); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static irqreturn_t sata_int_v2_hw(int irq_no, void *p) @@ -3333,8 +3312,8 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) { struct platform_device *pdev = hisi_hba->platform_dev; struct device *dev = &pdev->dev; - int irq, rc, irq_map[128]; - int i, phy_no, fatal_no, queue_no, k; + int irq, rc = 0, irq_map[128]; + int i, phy_no, fatal_no, queue_no; for (i = 0; i < 128; i++) irq_map[i] = platform_get_irq(pdev, i); @@ -3347,7 +3326,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; - goto free_phy_int_irqs; + goto err_out; } } @@ -3361,7 +3340,7 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) dev_err(dev, "irq init: could not request sata interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; - goto free_sata_int_irqs; + goto err_out; } } @@ -3373,49 +3352,29 @@ static int interrupt_init_v2_hw(struct hisi_hba *hisi_hba) dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; - goto free_fatal_int_irqs; + goto err_out; } } for (queue_no = 0; queue_no < hisi_hba->queue_count; queue_no++) { struct hisi_sas_cq *cq = &hisi_hba->cq[queue_no]; - struct tasklet_struct *t = &cq->tasklet; - irq = irq_map[queue_no + 96]; - rc = devm_request_irq(dev, irq, cq_interrupt_v2_hw, 0, - DRV_NAME " cq", cq); + cq->irq_no = irq_map[queue_no + 96]; + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v2_hw, + cq_thread_v2_hw, IRQF_ONESHOT, + DRV_NAME " cq", cq); if (rc) { dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n", irq, rc); rc = -ENOENT; - goto free_cq_int_irqs; + goto err_out; } - tasklet_init(t, cq_tasklet_v2_hw, (unsigned long)cq); } hisi_hba->cq_nvecs = hisi_hba->queue_count; - return 0; - -free_cq_int_irqs: - for (k = 0; k < queue_no; k++) { - struct hisi_sas_cq *cq = &hisi_hba->cq[k]; - - free_irq(irq_map[k + 96], cq); - tasklet_kill(&cq->tasklet); - } -free_fatal_int_irqs: - for (k = 0; k < fatal_no; k++) - free_irq(irq_map[k + 81], hisi_hba); -free_sata_int_irqs: - for (k = 0; k < phy_no; k++) { - struct hisi_sas_phy *phy = &hisi_hba->phy[k]; - - free_irq(irq_map[k + 72], phy); - } -free_phy_int_irqs: - for (k = 0; k < i; k++) - free_irq(irq_map[k + 1], hisi_hba); +err_out: return rc; } @@ -3471,7 +3430,6 @@ static int soft_reset_v2_hw(struct hisi_hba *hisi_hba) interrupt_disable_v2_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); - hisi_sas_kill_tasklets(hisi_hba); hisi_sas_stop_phys(hisi_hba); @@ -3544,8 +3502,8 @@ static int write_gpio_v2_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms) +static void wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; @@ -3559,12 +3517,13 @@ static int wait_cmds_complete_timeout_v2_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } - if (time >= timeout_ms) - return -ETIMEDOUT; + if (time >= timeout_ms) { + dev_dbg(dev, "Wait commands complete timeout!\n"); + return; + } dev_dbg(dev, "wait commands complete %dms\n", time); - return 0; } static struct device_attribute *host_attrs_v2_hw[] = { @@ -3589,6 +3548,9 @@ static struct scsi_host_template sht_v2_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v2_hw, .host_reset = hisi_sas_host_reset, }; @@ -3606,9 +3568,7 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .prep_ssp = prep_ssp_v2_hw, .prep_stp = prep_ata_v2_hw, .prep_abort = prep_abort_v2_hw, - .get_free_slot = get_free_slot_v2_hw, .start_delivery = start_delivery_v2_hw, - .slot_complete = slot_complete_v2_hw, .phys_init = phys_init_v2_hw, .phy_start = start_phy_v2_hw, .phy_disable = disable_phy_v2_hw, @@ -3616,7 +3576,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = { .get_events = phy_get_events_v2_hw, .phy_set_linkrate = phy_set_linkrate_v2_hw, .phy_get_max_linkrate = phy_get_max_linkrate_v2_hw, - .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V2_HW, .complete_hdr_size = sizeof(struct hisi_sas_complete_v2_hdr), .soft_reset = soft_reset_v2_hw, .get_phys_state = get_phys_state_v2_hw, @@ -3644,11 +3603,6 @@ static int hisi_sas_v2_probe(struct platform_device *pdev) static int hisi_sas_v2_remove(struct platform_device *pdev) { - struct sas_ha_struct *sha = platform_get_drvdata(pdev); - struct hisi_hba *hisi_hba = sha->lldd_ha; - - hisi_sas_kill_tasklets(hisi_hba); - return hisi_sas_remove(pdev); } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 5f0f6df11adf..a2debe0c8185 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -71,6 +71,7 @@ #define HGC_DQE_ECC_MB_ADDR_OFF 16 #define HGC_DQE_ECC_MB_ADDR_MSK (0xfff << HGC_DQE_ECC_MB_ADDR_OFF) #define CHNL_INT_STATUS 0x148 +#define TAB_DFX 0x14c #define HGC_ITCT_ECC_ADDR 0x150 #define HGC_ITCT_ECC_1B_ADDR_OFF 0 #define HGC_ITCT_ECC_1B_ADDR_MSK (0x3ff << \ @@ -83,6 +84,7 @@ #define AXI_ERR_INFO_MSK (0xff << AXI_ERR_INFO_OFF) #define FIFO_ERR_INFO_OFF 8 #define FIFO_ERR_INFO_MSK (0xff << FIFO_ERR_INFO_OFF) +#define TAB_RD_TYPE 0x15c #define INT_COAL_EN 0x19c #define OQ_INT_COAL_TIME 0x1a0 #define OQ_INT_COAL_CNT 0x1a4 @@ -189,12 +191,30 @@ #define PHY_CFG_PHY_RST_OFF 3 #define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF) #define PROG_PHY_LINK_RATE (PORT_BASE + 0x8) +#define CFG_PROG_PHY_LINK_RATE_OFF 8 +#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF) #define PHY_CTRL (PORT_BASE + 0x14) #define PHY_CTRL_RESET_OFF 0 #define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF) #define CMD_HDR_PIR_OFF 8 #define CMD_HDR_PIR_MSK (0x1 << CMD_HDR_PIR_OFF) #define SERDES_CFG (PORT_BASE + 0x1c) +#define CFG_ALOS_CHK_DISABLE_OFF 9 +#define CFG_ALOS_CHK_DISABLE_MSK (0x1 << CFG_ALOS_CHK_DISABLE_OFF) +#define SAS_PHY_BIST_CTRL (PORT_BASE + 0x2c) +#define CFG_BIST_MODE_SEL_OFF 0 +#define CFG_BIST_MODE_SEL_MSK (0xf << CFG_BIST_MODE_SEL_OFF) +#define CFG_LOOP_TEST_MODE_OFF 14 +#define CFG_LOOP_TEST_MODE_MSK (0x3 << CFG_LOOP_TEST_MODE_OFF) +#define CFG_RX_BIST_EN_OFF 16 +#define CFG_RX_BIST_EN_MSK (0x1 << CFG_RX_BIST_EN_OFF) +#define CFG_TX_BIST_EN_OFF 17 +#define CFG_TX_BIST_EN_MSK (0x1 << CFG_TX_BIST_EN_OFF) +#define CFG_BIST_TEST_OFF 18 +#define CFG_BIST_TEST_MSK (0x1 << CFG_BIST_TEST_OFF) +#define SAS_PHY_BIST_CODE (PORT_BASE + 0x30) +#define SAS_PHY_BIST_CODE1 (PORT_BASE + 0x34) +#define SAS_BIST_ERR_CNT (PORT_BASE + 0x38) #define SL_CFG (PORT_BASE + 0x84) #define AIP_LIMIT (PORT_BASE + 0x90) #define SL_CONTROL (PORT_BASE + 0x94) @@ -475,6 +495,13 @@ struct hisi_sas_err_record_v3 { #define BASE_VECTORS_V3_HW 16 #define MIN_AFFINE_VECTORS_V3_HW (BASE_VECTORS_V3_HW + 1) +#define CHNL_INT_STS_MSK 0xeeeeeeee +#define CHNL_INT_STS_PHY_MSK 0xe +#define CHNL_INT_STS_INT0_MSK BIT(1) +#define CHNL_INT_STS_INT1_MSK BIT(2) +#define CHNL_INT_STS_INT2_MSK BIT(3) +#define CHNL_WIDTH 4 + enum { DSM_FUNC_ERR_HANDLE_MSI = 0, }; @@ -499,13 +526,6 @@ static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) return readl(regs); } -static u32 hisi_sas_read32_relaxed(struct hisi_hba *hisi_hba, u32 off) -{ - void __iomem *regs = hisi_hba->regs + off; - - return readl_relaxed(regs); -} - static void hisi_sas_write32(struct hisi_hba *hisi_hba, u32 off, u32 val) { void __iomem *regs = hisi_hba->regs + off; @@ -782,13 +802,14 @@ static void setup_itct_v3_hw(struct hisi_hba *hisi_hba, (0x1ULL << ITCT_HDR_RTOLT_OFF)); } -static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, - struct hisi_sas_device *sas_dev) +static int clear_itct_v3_hw(struct hisi_hba *hisi_hba, + struct hisi_sas_device *sas_dev) { DECLARE_COMPLETION_ONSTACK(completion); u64 dev_id = sas_dev->device_id; struct hisi_sas_itct *itct = &hisi_hba->itct[dev_id]; u32 reg_val = hisi_sas_read32(hisi_hba, ENT_INT_SRC3); + struct device *dev = hisi_hba->dev; sas_dev->completion = &completion; @@ -801,8 +822,14 @@ static void clear_itct_v3_hw(struct hisi_hba *hisi_hba, reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK); hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val); - wait_for_completion(sas_dev->completion); + if (!wait_for_completion_timeout(sas_dev->completion, + CLEAR_ITCT_TIMEOUT * HZ)) { + dev_warn(dev, "failed to clear ITCT\n"); + return -ETIMEDOUT; + } + memset(itct, 0, sizeof(struct hisi_sas_itct)); + return 0; } static void dereg_device_v3_hw(struct hisi_hba *hisi_hba, @@ -1006,31 +1033,6 @@ static int get_wideport_bitmap_v3_hw(struct hisi_hba *hisi_hba, int port_id) return bitmap; } -/** - * The callpath to this function and upto writing the write - * queue pointer should be safe from interruption. - */ -static int -get_free_slot_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_dq *dq) -{ - struct device *dev = hisi_hba->dev; - int queue = dq->id; - u32 r, w; - - w = dq->wr_point; - r = hisi_sas_read32_relaxed(hisi_hba, - DLVRY_Q_0_RD_PTR + (queue * 0x14)); - if (r == (w+1) % HISI_SAS_QUEUE_SLOTS) { - dev_warn(dev, "full queue=%d r=%d w=%d\n", - queue, r, w); - return -EAGAIN; - } - - dq->wr_point = (dq->wr_point + 1) % HISI_SAS_QUEUE_SLOTS; - - return w; -} - static void start_delivery_v3_hw(struct hisi_sas_dq *dq) { struct hisi_hba *hisi_hba = dq->hisi_hba; @@ -1386,7 +1388,10 @@ static void prep_ata_v3_hw(struct hisi_hba *hisi_hba, hdr->dw1 = cpu_to_le32(dw1); /* dw2 */ - if (task->ata_task.use_ncq && hisi_sas_get_ncq_tag(task, &hdr_tag)) { + if (task->ata_task.use_ncq) { + struct ata_queued_cmd *qc = task->uldd_task; + + hdr_tag = qc->tag; task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3); dw2 |= hdr_tag << CMD_HDR_NCQ_TAG_OFF; } @@ -1551,6 +1556,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba) u32 phy_state, sl_ctrl, txid_auto; struct device *dev = hisi_hba->dev; + atomic_inc(&phy->down_cnt); + del_timer(&phy->timer); hisi_sas_phy_write32(hisi_hba, phy_no, PHYCTRL_NOT_RDY_MSK, 1); @@ -1819,19 +1826,19 @@ static irqreturn_t int_chnl_int_v3_hw(int irq_no, void *p) int phy_no = 0; irq_msk = hisi_sas_read32(hisi_hba, CHNL_INT_STATUS) - & 0xeeeeeeee; + & CHNL_INT_STS_MSK; while (irq_msk) { - if (irq_msk & (2 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT0_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int0_v3_hw(hisi_hba, phy_no); - if (irq_msk & (4 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT1_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int1_v3_hw(hisi_hba, phy_no); - if (irq_msk & (8 << (phy_no * 4))) + if (irq_msk & (CHNL_INT_STS_INT2_MSK << (phy_no * CHNL_WIDTH))) handle_chl_int2_v3_hw(hisi_hba, phy_no); - irq_msk &= ~(0xe << (phy_no * 4)); + irq_msk &= ~(CHNL_INT_STS_PHY_MSK << (phy_no * CHNL_WIDTH)); phy_no++; } @@ -1944,7 +1951,7 @@ static void fatal_ecc_int_v3_hw(struct hisi_hba *hisi_hba) u32 irq_value, irq_msk; irq_msk = hisi_sas_read32(hisi_hba, SAS_ECC_INTR_MSK); - hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, irq_msk | 0xffffffff); + hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xffffffff); irq_value = hisi_sas_read32(hisi_hba, SAS_ECC_INTR); if (irq_value) @@ -2220,7 +2227,7 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) slot_err_v3_hw(hisi_hba, task, slot); if (ts->stat != SAS_DATA_UNDERRUN) - dev_info(dev, "erroneous completion iptt=%d task=%p dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", + dev_info(dev, "erroneous completion iptt=%d task=%pK dev id=%d CQ hdr: 0x%x 0x%x 0x%x 0x%x Error info: 0x%x 0x%x 0x%x 0x%x\n", slot->idx, task, sas_dev->device_id, dw0, dw1, complete_hdr->act, dw3, error_info[0], error_info[1], @@ -2241,20 +2248,16 @@ slot_complete_v3_hw(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot) } case SAS_PROTOCOL_SMP: { struct scatterlist *sg_resp = &task->smp_task.smp_resp; - void *to; + void *to = page_address(sg_page(sg_resp)); ts->stat = SAM_STAT_GOOD; - to = kmap_atomic(sg_page(sg_resp)); - dma_unmap_sg(dev, &task->smp_task.smp_resp, 1, - DMA_FROM_DEVICE); dma_unmap_sg(dev, &task->smp_task.smp_req, 1, DMA_TO_DEVICE); memcpy(to + sg_resp->offset, hisi_sas_status_buf_addr_mem(slot) + sizeof(struct hisi_sas_err_record), - sg_dma_len(sg_resp)); - kunmap_atomic(to); + sg_resp->length); break; } case SAS_PROTOCOL_SATA: @@ -2279,7 +2282,7 @@ out: spin_lock_irqsave(&task->task_state_lock, flags); if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { spin_unlock_irqrestore(&task->task_state_lock, flags); - dev_info(dev, "slot complete: task(%p) aborted\n", task); + dev_info(dev, "slot complete: task(%pK) aborted\n", task); return SAS_ABORTED_TASK; } task->task_state_flags |= SAS_TASK_STATE_DONE; @@ -2290,7 +2293,7 @@ out: spin_lock_irqsave(&device->done_lock, flags); if (test_bit(SAS_HA_FROZEN, &ha->state)) { spin_unlock_irqrestore(&device->done_lock, flags); - dev_info(dev, "slot complete: task(%p) ignored\n ", + dev_info(dev, "slot complete: task(%pK) ignored\n ", task); return sts; } @@ -2303,9 +2306,9 @@ out: return sts; } -static void cq_tasklet_v3_hw(unsigned long val) +static irqreturn_t cq_thread_v3_hw(int irq_no, void *p) { - struct hisi_sas_cq *cq = (struct hisi_sas_cq *)val; + struct hisi_sas_cq *cq = p; struct hisi_hba *hisi_hba = cq->hisi_hba; struct hisi_sas_slot *slot; struct hisi_sas_complete_v3_hdr *complete_queue; @@ -2342,6 +2345,8 @@ static void cq_tasklet_v3_hw(unsigned long val) /* update rd_point */ cq->rd_point = rd_point; hisi_sas_write32(hisi_hba, COMPL_Q_0_RD_PTR + (0x14 * queue), rd_point); + + return IRQ_HANDLED; } static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) @@ -2352,9 +2357,7 @@ static irqreturn_t cq_interrupt_v3_hw(int irq_no, void *p) hisi_sas_write32(hisi_hba, OQ_INT_SRC, 1 << queue); - tasklet_schedule(&cq->tasklet); - - return IRQ_HANDLED; + return IRQ_WAKE_THREAD; } static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) @@ -2369,7 +2372,7 @@ static void setup_reply_map_v3_hw(struct hisi_hba *hisi_hba, int nvecs) BASE_VECTORS_V3_HW); if (!mask) goto fallback; - cq->pci_irq_mask = mask; + cq->irq_mask = mask; for_each_cpu(cpu, mask) hisi_hba->reply_map[cpu] = queue; } @@ -2385,8 +2388,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) { struct device *dev = hisi_hba->dev; struct pci_dev *pdev = hisi_hba->pci_dev; - int vectors, rc; - int i, k; + int vectors, rc, i; int max_msi = HISI_SAS_MSI_COUNT_V3_HW, min_msi; if (auto_affine_msi_experimental) { @@ -2394,6 +2396,8 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) .pre_vectors = BASE_VECTORS_V3_HW, }; + dev_info(dev, "Enable MSI auto-affinity\n"); + min_msi = MIN_AFFINE_VECTORS_V3_HW; hisi_hba->reply_map = devm_kcalloc(dev, nr_cpu_ids, @@ -2434,7 +2438,7 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) if (rc) { dev_err(dev, "could not request chnl interrupt, rc=%d\n", rc); rc = -ENOENT; - goto free_phy_irq; + goto free_irq_vectors; } rc = devm_request_irq(dev, pci_irq_vector(pdev, 11), @@ -2443,43 +2447,34 @@ static int interrupt_init_v3_hw(struct hisi_hba *hisi_hba) if (rc) { dev_err(dev, "could not request fatal interrupt, rc=%d\n", rc); rc = -ENOENT; - goto free_chnl_interrupt; + goto free_irq_vectors; } - /* Init tasklets for cq only */ + if (hisi_sas_intr_conv) + dev_info(dev, "Enable interrupt converge\n"); + for (i = 0; i < hisi_hba->cq_nvecs; i++) { struct hisi_sas_cq *cq = &hisi_hba->cq[i]; - struct tasklet_struct *t = &cq->tasklet; int nr = hisi_sas_intr_conv ? 16 : 16 + i; - unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : 0; - - rc = devm_request_irq(dev, pci_irq_vector(pdev, nr), - cq_interrupt_v3_hw, irqflags, + unsigned long irqflags = hisi_sas_intr_conv ? IRQF_SHARED : + IRQF_ONESHOT; + + cq->irq_no = pci_irq_vector(pdev, nr); + rc = devm_request_threaded_irq(dev, cq->irq_no, + cq_interrupt_v3_hw, + cq_thread_v3_hw, + irqflags, DRV_NAME " cq", cq); if (rc) { dev_err(dev, "could not request cq%d interrupt, rc=%d\n", i, rc); rc = -ENOENT; - goto free_cq_irqs; + goto free_irq_vectors; } - - tasklet_init(t, cq_tasklet_v3_hw, (unsigned long)cq); } return 0; -free_cq_irqs: - for (k = 0; k < i; k++) { - struct hisi_sas_cq *cq = &hisi_hba->cq[k]; - int nr = hisi_sas_intr_conv ? 16 : 16 + k; - - free_irq(pci_irq_vector(pdev, nr), cq); - } - free_irq(pci_irq_vector(pdev, 11), hisi_hba); -free_chnl_interrupt: - free_irq(pci_irq_vector(pdev, 2), hisi_hba); -free_phy_irq: - free_irq(pci_irq_vector(pdev, 1), hisi_hba); free_irq_vectors: pci_free_irq_vectors(pdev); return rc; @@ -2551,7 +2546,6 @@ static int disable_host_v3_hw(struct hisi_hba *hisi_hba) interrupt_disable_v3_hw(hisi_hba); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0x0); - hisi_sas_kill_tasklets(hisi_hba); hisi_sas_stop_phys(hisi_hba); @@ -2620,8 +2614,8 @@ static int write_gpio_v3_hw(struct hisi_hba *hisi_hba, u8 reg_type, return 0; } -static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, - int delay_ms, int timeout_ms) +static void wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, + int delay_ms, int timeout_ms) { struct device *dev = hisi_hba->dev; int entries, entries_old = 0, time; @@ -2635,12 +2629,12 @@ static int wait_cmds_complete_timeout_v3_hw(struct hisi_hba *hisi_hba, msleep(delay_ms); } - if (time >= timeout_ms) - return -ETIMEDOUT; + if (time >= timeout_ms) { + dev_dbg(dev, "Wait commands complete timeout!\n"); + return; + } dev_dbg(dev, "wait commands complete %dms\n", time); - - return 0; } static ssize_t intr_conv_v3_hw_show(struct device *dev, @@ -2887,18 +2881,47 @@ static const struct hisi_sas_debugfs_reg debugfs_global_reg = { .read_global_reg = hisi_sas_read32, }; +static const struct hisi_sas_debugfs_reg_lu debugfs_axi_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(AM_CFG_MAX_TRANS), + HISI_SAS_DEBUGFS_REG(AM_CFG_SINGLE_PORT_MAX_TRANS), + HISI_SAS_DEBUGFS_REG(AXI_CFG), + HISI_SAS_DEBUGFS_REG(AM_ROB_ECC_ERR_ADDR), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_axi_reg = { + .lu = debugfs_axi_reg_lu, + .count = 0x61, + .base_off = AXI_MASTER_CFG_BASE, + .read_global_reg = hisi_sas_read32, +}; + +static const struct hisi_sas_debugfs_reg_lu debugfs_ras_reg_lu[] = { + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR0_MASK), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR1_MASK), + HISI_SAS_DEBUGFS_REG(CFG_SAS_RAS_INTR_MASK), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2), + HISI_SAS_DEBUGFS_REG(SAS_RAS_INTR2_MASK), + {} +}; + +static const struct hisi_sas_debugfs_reg debugfs_ras_reg = { + .lu = debugfs_ras_reg_lu, + .count = 0x10, + .base_off = RAS_BASE, + .read_global_reg = hisi_sas_read32, +}; + static void debugfs_snapshot_prepare_v3_hw(struct hisi_hba *hisi_hba) { - struct device *dev = hisi_hba->dev; - set_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE, 0); - if (wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000) == -ETIMEDOUT) - dev_dbg(dev, "Wait commands complete timeout!\n"); + wait_cmds_complete_timeout_v3_hw(hisi_hba, 100, 5000); - hisi_sas_kill_tasklets(hisi_hba); + hisi_sas_sync_irqs(hisi_hba); } static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) @@ -2909,6 +2932,142 @@ static void debugfs_snapshot_restore_v3_hw(struct hisi_hba *hisi_hba) clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); } +static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba, + enum hisi_sas_debugfs_cache_type type, + u32 *cache) +{ + u32 cache_dw_size = HISI_SAS_IOST_ITCT_CACHE_DW_SZ * + HISI_SAS_IOST_ITCT_CACHE_NUM; + u32 *buf = cache; + u32 i, val; + + hisi_sas_write32(hisi_hba, TAB_RD_TYPE, type); + + for (i = 0; i < HISI_SAS_IOST_ITCT_CACHE_DW_SZ; i++) { + val = hisi_sas_read32(hisi_hba, TAB_DFX); + if (val == 0xffffffff) + break; + } + + if (val != 0xffffffff) { + pr_err("Issue occur when reading IOST/ITCT cache!\n"); + return; + } + + memset(buf, 0, cache_dw_size * 4); + buf[0] = val; + + for (i = 1; i < cache_dw_size; i++) + buf[i] = hisi_sas_read32(hisi_hba, TAB_DFX); +} + +static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_id = hisi_hba->debugfs_bist_phy_no; + + /* disable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_id, 0); + + /* disable ALOS */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG); + reg_val |= CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val); +} + +static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba) +{ + u32 reg_val; + int phy_id = hisi_hba->debugfs_bist_phy_no; + + /* disable loopback */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val); + + /* enable ALOS */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG); + reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK; + hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val); + + /* restore the linkrate */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE); + /* init OOB link rate as 1.5 Gbits */ + reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK; + reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val); + + /* enable PHY */ + hisi_sas_phy_enable(hisi_hba, phy_id, 1); +} + +#define SAS_PHY_BIST_CODE_INIT 0x1 +#define SAS_PHY_BIST_CODE1_INIT 0X80 +static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable) +{ + u32 reg_val, mode_tmp; + u32 linkrate = hisi_hba->debugfs_bist_linkrate; + u32 phy_id = hisi_hba->debugfs_bist_phy_no; + u32 code_mode = hisi_hba->debugfs_bist_code_mode; + u32 path_mode = hisi_hba->debugfs_bist_mode; + struct device *dev = hisi_hba->dev; + + dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n", + linkrate, phy_id, code_mode, path_mode); + mode_tmp = path_mode ? 2 : 1; + if (enable) { + /* some preparations before bist test */ + hisi_sas_bist_test_prep_v3_hw(hisi_hba); + + /* set linkrate of bit test*/ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, + PROG_PHY_LINK_RATE); + reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK; + reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF); + hisi_sas_phy_write32(hisi_hba, phy_id, + PROG_PHY_LINK_RATE, reg_val); + + /* set code mode of bit test */ + reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL); + reg_val &= ~(CFG_BIST_MODE_SEL_MSK | + CFG_LOOP_TEST_MODE_MSK | + CFG_RX_BIST_EN_MSK | + CFG_TX_BIST_EN_MSK | + CFG_BIST_TEST_MSK); + reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) | + (mode_tmp << CFG_LOOP_TEST_MODE_OFF) | + CFG_BIST_TEST_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL, reg_val); + + /* set the bist init value */ + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CODE, + SAS_PHY_BIST_CODE_INIT); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CODE1, + SAS_PHY_BIST_CODE1_INIT); + + mdelay(100); + reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK); + hisi_sas_phy_write32(hisi_hba, phy_id, + SAS_PHY_BIST_CTRL, reg_val); + + /* clear error bit */ + mdelay(100); + hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT); + } else { + /* disable bist test and recover it */ + hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba, + phy_id, SAS_BIST_ERR_CNT); + hisi_sas_bist_test_restore_v3_hw(hisi_hba); + } + + return 0; +} + static struct scsi_host_template sht_v3_hw = { .name = DRV_NAME, .module = THIS_MODULE, @@ -2927,6 +3086,9 @@ static struct scsi_host_template sht_v3_hw = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = host_attrs_v3_hw, .tag_alloc_policy = BLK_TAG_ALLOC_RR, .host_reset = hisi_sas_host_reset, @@ -2935,7 +3097,6 @@ static struct scsi_host_template sht_v3_hw = { static const struct hisi_sas_hw hisi_sas_v3_hw = { .hw_init = hisi_sas_v3_init, .setup_itct = setup_itct_v3_hw, - .max_command_entries = HISI_SAS_COMMAND_ENTRIES_V3_HW, .get_wideport_bitmap = get_wideport_bitmap_v3_hw, .complete_hdr_size = sizeof(struct hisi_sas_complete_v3_hdr), .clear_itct = clear_itct_v3_hw, @@ -2944,9 +3105,7 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .prep_smp = prep_smp_v3_hw, .prep_stp = prep_ata_v3_hw, .prep_abort = prep_abort_v3_hw, - .get_free_slot = get_free_slot_v3_hw, .start_delivery = start_delivery_v3_hw, - .slot_complete = slot_complete_v3_hw, .phys_init = phys_init_v3_hw, .phy_start = start_phy_v3_hw, .phy_disable = disable_phy_v3_hw, @@ -2959,10 +3118,14 @@ static const struct hisi_sas_hw hisi_sas_v3_hw = { .get_events = phy_get_events_v3_hw, .write_gpio = write_gpio_v3_hw, .wait_cmds_complete_timeout = wait_cmds_complete_timeout_v3_hw, - .debugfs_reg_global = &debugfs_global_reg, + .debugfs_reg_array[DEBUGFS_GLOBAL] = &debugfs_global_reg, + .debugfs_reg_array[DEBUGFS_AXI] = &debugfs_axi_reg, + .debugfs_reg_array[DEBUGFS_RAS] = &debugfs_ras_reg, .debugfs_reg_port = &debugfs_port_reg, .snapshot_prepare = debugfs_snapshot_prepare_v3_hw, .snapshot_restore = debugfs_snapshot_restore_v3_hw, + .read_iost_itct_cache = read_iost_itct_cache_v3_hw, + .set_bist = debugfs_set_bist_v3_hw, }; static struct Scsi_Host * @@ -2993,8 +3156,6 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) else hisi_hba->prot_mask = prot_mask; - timer_setup(&hisi_hba->timer, NULL, 0); - if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; @@ -3076,17 +3237,14 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) shost->max_lun = ~0; shost->max_channel = 1; shost->max_cmd_len = 16; - shost->can_queue = hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT; - shost->cmd_per_lun = hisi_hba->hw->max_command_entries - - HISI_SAS_RESERVED_IPTT_CNT; + shost->can_queue = HISI_SAS_UNRESERVED_IPTT; + shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT; sha->sas_ha_name = DRV_NAME; sha->dev = dev; sha->lldd_module = THIS_MODULE; sha->sas_addr = &hisi_hba->sas_addr[0]; sha->num_phys = hisi_hba->n_phy; - sha->core.shost = hisi_hba->shost; for (i = 0; i < hisi_hba->n_phy; i++) { sha->sas_phy[i] = &hisi_hba->phy[i].sas_phy; @@ -3124,6 +3282,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_out_register_ha: scsi_remove_host(shost); err_out_ha: + hisi_sas_debugfs_exit(hisi_hba); scsi_host_put(shost); err_out_regions: pci_release_regions(pdev); @@ -3157,8 +3316,6 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) struct hisi_hba *hisi_hba = sha->lldd_ha; struct Scsi_Host *shost = sha->core.shost; - hisi_sas_debugfs_exit(hisi_hba); - if (timer_pending(&hisi_hba->timer)) del_timer(&hisi_hba->timer); @@ -3166,10 +3323,10 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev) sas_remove_host(sha->core.shost); hisi_sas_v3_destroy_irqs(pdev, hisi_hba); - hisi_sas_kill_tasklets(hisi_hba); pci_release_regions(pdev); pci_disable_device(pdev); hisi_sas_free(hisi_hba); + hisi_sas_debugfs_exit(hisi_hba); scsi_host_put(shost); } @@ -3273,15 +3430,22 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D0, 0); pci_restore_state(pdev); rc = pci_enable_device(pdev); - if (rc) + if (rc) { dev_err(dev, "enable device failed during resume (%d)\n", rc); + return rc; + } pci_set_master(pdev); scsi_unblock_requests(shost); clear_bit(HISI_SAS_REJECT_CMD_BIT, &hisi_hba->flags); sas_prep_resume_ha(sha); - init_reg_v3_hw(hisi_hba); + rc = hw_init_v3_hw(hisi_hba); + if (rc) { + scsi_remove_host(shost); + pci_disable_device(pdev); + return rc; + } hisi_hba->hw->phys_init(hisi_hba); sas_resume_ha(sha); clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags); diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 55522b7162d3..1d669e47b692 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -38,6 +38,7 @@ #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport.h> +#include <scsi/scsi_cmnd.h> #include "scsi_priv.h" #include "scsi_logging.h" @@ -554,13 +555,29 @@ struct Scsi_Host *scsi_host_get(struct Scsi_Host *shost) } EXPORT_SYMBOL(scsi_host_get); +static bool scsi_host_check_in_flight(struct request *rq, void *data, + bool reserved) +{ + int *count = data; + struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); + + if (test_bit(SCMD_STATE_INFLIGHT, &cmd->state)) + (*count)++; + + return true; +} + /** * scsi_host_busy - Return the host busy counter * @shost: Pointer to Scsi_Host to inc. **/ int scsi_host_busy(struct Scsi_Host *shost) { - return atomic_read(&shost->host_busy); + int cnt = 0; + + blk_mq_tagset_busy_iter(&shost->tag_set, + scsi_host_check_in_flight, &cnt); + return cnt; } EXPORT_SYMBOL(scsi_host_busy); diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 1bb6aada93fa..1a4ddfacb458 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -21,7 +21,6 @@ #include <linux/interrupt.h> #include <linux/types.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/delay.h> @@ -5478,6 +5477,8 @@ static int hpsa_ciss_submit(struct ctlr_info *h, return SCSI_MLQUEUE_HOST_BUSY; } + c->device = dev; + enqueue_cmd_and_start_io(h, c); /* the cmd'll come back via intr handler in complete_scsi_command() */ return 0; @@ -5549,6 +5550,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, hpsa_cmd_init(h, c->cmdindex, c); c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; rc = hpsa_scsi_ioaccel_raid_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; @@ -5556,6 +5558,7 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h, hpsa_cmd_init(h, c->cmdindex, c); c->cmd_type = CMD_SCSI; c->scsi_cmd = cmd; + c->device = dev; rc = hpsa_scsi_ioaccel_direct_map(h, c); if (rc < 0) /* scsi_dma_map failed. */ rc = SCSI_MLQUEUE_HOST_BUSY; @@ -6873,7 +6876,7 @@ static void __iomem *remap_pci_mem(ulong base, ulong size) { ulong page_base = ((ulong) base) & PAGE_MASK; ulong page_offs = ((ulong) base) - page_base; - void __iomem *page_remapped = ioremap_nocache(page_base, + void __iomem *page_remapped = ioremap(page_base, page_offs + size); return page_remapped ? (page_remapped + page_offs) : NULL; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 8cdbac076a1b..df897df5cafe 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1830,6 +1830,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_els.port_id[0] << 16) | (bsg_request->rqst_data.h_els.port_id[1] << 8) | bsg_request->rqst_data.h_els.port_id[2]; + /* fall through */ case FC_BSG_RPT_ELS: fc_flags = IBMVFC_FC_ELS; break; @@ -1838,6 +1839,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job) port_id = (bsg_request->rqst_data.h_ct.port_id[0] << 16) | (bsg_request->rqst_data.h_ct.port_id[1] << 8) | bsg_request->rqst_data.h_ct.port_id[2]; + /* fall through */ case FC_BSG_RPT_CT: fc_flags = IBMVFC_FC_CT_IU; break; @@ -4020,6 +4022,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt) return; case IBMVFC_MAD_CRQ_ERROR: ibmvfc_retry_host_init(vhost); + /* fall through */ case IBMVFC_MAD_DRIVER_FAILED: ibmvfc_free_event(evt); return; diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 7f9535392a93..d9e94e81da01 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -1581,6 +1581,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi, case H_PERMISSION: if (connection_broken(vscsi)) flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED); + /* Fall through */ default: dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n", rc); @@ -1876,7 +1877,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) */ struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; struct ibmvscsis_cmd *cmd, *nxt; - struct iu_entry *iue; long rc = ADAPT_SUCCESS; bool retry = false; @@ -1930,8 +1930,6 @@ static void ibmvscsis_send_messages(struct scsi_info *vscsi) */ vscsi->credit += 1; } else { - iue = cmd->iue; - crq->valid = VALID_CMD_RESP_EL; crq->format = cmd->rsp.format; @@ -2353,7 +2351,6 @@ static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, { struct iu_entry *iue = cmd->iue; struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout; - long rc = ADAPT_SUCCESS; if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) || !list_empty(&vscsi->waiting_rsp)) { @@ -2369,7 +2366,7 @@ static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi, ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); } - return rc; + return ADAPT_SUCCESS; } /* Called with intr lock held */ @@ -2492,8 +2489,10 @@ static long ibmvscsis_ping_response(struct scsi_info *vscsi) break; case H_CLOSED: vscsi->flags |= CLIENT_FAILED; + /* Fall through */ case H_DROPPED: vscsi->flags |= RESPONSE_Q_DOWN; + /* Fall through */ case H_REMOTE_PARM: dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n", rc); @@ -3794,7 +3793,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) se_cmd); struct iu_entry *iue = cmd->iue; struct scsi_info *vscsi = cmd->adapter; - char *sd; uint len = 0; int rc; @@ -3802,7 +3800,6 @@ static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd) 1); if (rc) { dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc); - sd = se_cmd->sense_buffer; se_cmd->scsi_sense_length = 18; memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length); /* Logical Unit Communication Time-out asc/ascq = 0x0801 */ diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c index 41fd64c9c8e9..1d39628ac947 100644 --- a/drivers/scsi/initio.c +++ b/drivers/scsi/initio.c @@ -1640,7 +1640,7 @@ static int initio_state_6(struct initio_host * host) * */ -int initio_state_7(struct initio_host * host) +static int initio_state_7(struct initio_host * host) { int cnt, i; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 079c04bc448a..ae45cbe98ae2 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -6727,6 +6727,9 @@ static struct scsi_host_template driver_template = { .name = "IPR", .info = ipr_ioa_info, .ioctl = ipr_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = ipr_ioctl, +#endif .queuecommand = ipr_queuecommand, .eh_abort_handler = ipr_eh_abort, .eh_device_reset_handler = ipr_eh_dev_reset, diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c index e8bc8d328bab..f25672982c5f 100644 --- a/drivers/scsi/ips.c +++ b/drivers/scsi/ips.c @@ -498,7 +498,7 @@ ips_setup(char *ips_str) int i; char *key; char *value; - IPS_OPTION options[] = { + static const IPS_OPTION options[] = { {"noi2o", &ips_force_i2o, 0}, {"nommap", &ips_force_memio, 0}, {"ioctlsize", &ips_ioctlsize, IPS_IOCTL_SIZE}, diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 1727d0c71b12..b48aac8dfcb8 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -168,6 +168,9 @@ static struct scsi_host_template isci_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = isci_host_attrs, .track_queue_depth = 1, }; diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 9e8de1462593..b1c197505579 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -147,7 +147,7 @@ static struct isci_port *sci_port_configuration_agent_find_port( /** * * @controller: This is the controller object that contains the port agent - * @port_agent: This is the port configruation agent for the controller. + * @port_agent: This is the port configuration agent for the controller. * * This routine will validate the port configuration is correct for the SCU * hardware. The SCU hardware allows for port configurations as follows. LP0 diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index 49aa4e657c44..cd1e4b4d95bb 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -1504,7 +1504,7 @@ static enum sci_status isci_remote_device_construct(struct isci_port *iport, * This function builds the isci_remote_device when a libsas dev_found message * is received. * @isci_host: This parameter specifies the isci host object. - * @port: This parameter specifies the isci_port conected to this device. + * @port: This parameter specifies the isci_port connected to this device. * * pointer to new isci_remote_device. */ diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 7bedbe877704..b5dd1caae5e9 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -369,8 +369,16 @@ static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; unsigned int noreclaim_flag; + struct iscsi_tcp_conn *tcp_conn = conn->dd_data; + struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; int rc = 0; + if (!tcp_sw_conn->sock) { + iscsi_conn_printk(KERN_ERR, conn, + "Transport not bound to socket!\n"); + return -EINVAL; + } + noreclaim_flag = memalloc_noreclaim_save(); while (iscsi_sw_tcp_xmit_qlen(conn)) { @@ -879,6 +887,10 @@ free_host: static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct iscsi_session *session = cls_session->dd_data; + + if (WARN_ON_ONCE(session->leadconn)) + return; iscsi_tcp_r2tpool_free(cls_session->dd_data); iscsi_session_teardown(cls_session); diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c index abac2f350aee..c48a73a0f517 100644 --- a/drivers/scsi/lasi700.c +++ b/drivers/scsi/lasi700.c @@ -98,7 +98,7 @@ lasi700_probe(struct parisc_device *dev) hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; if (dev->id.sversion == LASI_700_SVERSION) { diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index ebd47c0cf9e9..70b99c0e2e67 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1945,7 +1945,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc); - spin_lock(&session->frwd_lock); + spin_lock_bh(&session->frwd_lock); task = (struct iscsi_task *)sc->SCp.ptr; if (!task) { /* @@ -2072,7 +2072,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc) done: if (task) task->last_timeout = jiffies; - spin_unlock(&session->frwd_lock); + spin_unlock_bh(&session->frwd_lock); ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ? "timer reset" : "shutdown or nh"); return rc; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index e9e00740f7ca..c5a828a041e0 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -137,7 +137,7 @@ static void sas_ata_task_done(struct sas_task *task) } else { ac = sas_to_ata_err(stat); if (ac) { - pr_warn("%s: SAS error %x\n", __func__, stat->stat); + pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat); /* We saw a SAS error. Send a vague error. */ if (!link->sactive) { qc->err_mask = ac; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index abcad097ff2f..daf951b0b3f5 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -81,12 +81,21 @@ static int sas_get_port_device(struct asd_sas_port *port) else dev->dev_type = SAS_SATA_DEV; dev->tproto = SAS_PROTOCOL_SATA; - } else { + } else if (port->oob_mode == SAS_OOB_MODE) { struct sas_identify_frame *id = (struct sas_identify_frame *) dev->frame_rcvd; dev->dev_type = id->dev_type; dev->iproto = id->initiator_bits; dev->tproto = id->target_bits; + } else { + /* If the oob mode is OOB_NOT_CONNECTED, the port is + * disconnected due to race with PHY down. We cannot + * continue to discover this port + */ + sas_put_device(dev); + pr_warn("Port %016llx is disconnected when discovering\n", + SAS_ADDR(port->attached_sas_addr)); + return -ENODEV; } sas_init_dev(dev); @@ -170,7 +179,7 @@ int sas_notify_lldd_dev_found(struct domain_device *dev) res = i->dft->lldd_dev_found(dev); if (res) { - pr_warn("driver on host %s cannot handle device %llx, error:%d\n", + pr_warn("driver on host %s cannot handle device %016llx, error:%d\n", dev_name(sas_ha->dev), SAS_ADDR(dev->sas_addr), res); } @@ -459,6 +468,7 @@ static void sas_discover_domain(struct work_struct *work) pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); /* Fall through */ #endif + /* Fall through - only for the #else condition above. */ default: error = -ENXIO; pr_err("unhandled device %d\n", dev->dev_type); diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 9fdb9c9fbda4..ab671cdd4cfb 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -500,7 +500,7 @@ static int sas_ex_general(struct domain_device *dev) ex_assign_report_general(dev, rg_resp); if (dev->ex_dev.configuring) { - pr_debug("RG: ex %llx self-configuring...\n", + pr_debug("RG: ex %016llx self-configuring...\n", SAS_ADDR(dev->sas_addr)); schedule_timeout_interruptible(5*HZ); } else @@ -881,7 +881,7 @@ static struct domain_device *sas_ex_discover_end_dev( res = sas_discover_end_dev(child); if (res) { - pr_notice("sas_discover_end_dev() for device %16llx at %016llx:%02d returned 0x%x\n", + pr_notice("sas_discover_end_dev() for device %016llx at %016llx:%02d returned 0x%x\n", SAS_ADDR(child->sas_addr), SAS_ADDR(parent->sas_addr), phy_id, res); goto out_list_del; diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 01f1738ce6df..1f1d01901978 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -107,7 +107,7 @@ static inline void sas_smp_host_handler(struct bsg_job *job, static inline void sas_fail_probe(struct domain_device *dev, const char *func, int err) { - pr_warn("%s: for %s device %16llx returned %d\n", + pr_warn("%s: for %s device %016llx returned %d\n", func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); diff --git a/drivers/scsi/libsas/sas_port.c b/drivers/scsi/libsas/sas_port.c index 7c86fd248129..19cf418928fa 100644 --- a/drivers/scsi/libsas/sas_port.c +++ b/drivers/scsi/libsas/sas_port.c @@ -165,7 +165,7 @@ static void sas_form_port(struct asd_sas_phy *phy) } sas_port_add_phy(port->port, phy->phy); - pr_debug("%s added to %s, phy_mask:0x%x (%16llx)\n", + pr_debug("%s added to %s, phy_mask:0x%x (%016llx)\n", dev_name(&phy->phy->dev), dev_name(&port->port->dev), port->phy_mask, SAS_ADDR(port->attached_sas_addr)); diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index bec83eb8ab87..9e0975e55c27 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -330,7 +330,7 @@ static int sas_recover_lu(struct domain_device *dev, struct scsi_cmnd *cmd) int_to_scsilun(cmd->device->lun, &lun); - pr_notice("eh: device %llx LUN %llx has the task\n", + pr_notice("eh: device %016llx LUN 0x%llx has the task\n", SAS_ADDR(dev->sas_addr), cmd->device->lun); @@ -615,7 +615,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * reset: tmf_resp = sas_recover_lu(task->dev, cmd); if (tmf_resp == TMF_RESP_FUNC_COMPLETE) { - pr_notice("dev %016llx LU %llx is recovered\n", + pr_notice("dev %016llx LU 0x%llx is recovered\n", SAS_ADDR(task->dev), cmd->device->lun); sas_eh_finish_cmd(cmd); @@ -666,7 +666,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head * * of effort could recover from errors. Quite * possibly the HA just disappeared. */ - pr_err("error from device %llx, LUN %llx couldn't be recovered in any way\n", + pr_err("error from device %016llx, LUN 0x%llx couldn't be recovered in any way\n", SAS_ADDR(task->dev->sas_addr), cmd->device->lun); @@ -851,7 +851,7 @@ int sas_slave_configure(struct scsi_device *scsi_dev) if (scsi_dev->tagged_supported) { scsi_change_queue_depth(scsi_dev, SAS_DEF_QD); } else { - pr_notice("device %llx, LUN %llx doesn't support TCQ\n", + pr_notice("device %016llx, LUN 0x%llx doesn't support TCQ\n", SAS_ADDR(dev->sas_addr), scsi_dev->lun); scsi_change_queue_depth(scsi_dev, 1); } diff --git a/drivers/scsi/libsas/sas_task.c b/drivers/scsi/libsas/sas_task.c index 1ded7d85027e..e2d42593ce52 100644 --- a/drivers/scsi/libsas/sas_task.c +++ b/drivers/scsi/libsas/sas_task.c @@ -27,7 +27,7 @@ void sas_ssp_task_response(struct device *dev, struct sas_task *task, memcpy(tstat->buf, iu->sense_data, tstat->buf_valid_size); if (iu->status != SAM_STAT_CHECK_CONDITION) - dev_warn(dev, "dev %llx sent sense data, but stat(%x) is not CHECK CONDITION\n", + dev_warn(dev, "dev %016llx sent sense data, but stat(0x%x) is not CHECK CONDITION\n", SAS_ADDR(task->dev->sas_addr), iu->status); } else diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index 2c3bb8a966e5..04d73e2be373 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -51,6 +51,8 @@ struct lpfc_sli2_slim; cmnd for menlo needs nearly twice as for firmware downloads using bsg */ +#define LPFC_DEFAULT_XPSGL_SIZE 256 +#define LPFC_MAX_SG_TABLESIZE 0xffff #define LPFC_MIN_SG_SLI4_BUF_SZ 0x800 /* based on LPFC_DEFAULT_SG_SEG_CNT */ #define LPFC_MAX_BG_SLI4_SEG_CNT_DIF 128 /* sg element count for BlockGuard */ #define LPFC_MAX_SG_SEG_CNT_DIF 512 /* sg element count per scsi cmnd */ @@ -603,6 +605,12 @@ struct lpfc_epd_pool { spinlock_t lock; /* lock for expedite pool */ }; +enum ras_state { + INACTIVE, + REG_INPROGRESS, + ACTIVE +}; + struct lpfc_ras_fwlog { uint8_t *fwlog_buff; uint32_t fw_buffcount; /* Buffer size posted to FW */ @@ -619,7 +627,7 @@ struct lpfc_ras_fwlog { bool ras_enabled; /* Ras Enabled for the function */ #define LPFC_RAS_DISABLE_LOGGING 0x00 #define LPFC_RAS_ENABLE_LOGGING 0x01 - bool ras_active; /* RAS logging running state */ + enum ras_state state; /* RAS logging running state */ }; struct lpfc_hba { @@ -723,6 +731,7 @@ struct lpfc_hba { #define HBA_FCOE_MODE 0x4 /* HBA function in FCoE Mode */ #define HBA_SP_QUEUE_EVT 0x8 /* Slow-path qevt posted to worker thread*/ #define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */ +#define HBA_PERSISTENT_TOPO 0x20 /* Persistent topology support in hba */ #define ELS_XRI_ABORT_EVENT 0x40 #define ASYNC_EVENT 0x80 #define LINK_DISABLED 0x100 /* Link disabled by user */ @@ -732,14 +741,13 @@ struct lpfc_hba { #define HBA_AER_ENABLED 0x1000 /* AER enabled with HBA */ #define HBA_DEVLOSS_TMO 0x2000 /* HBA in devloss timeout */ #define HBA_RRQ_ACTIVE 0x4000 /* process the rrq active list */ -#define HBA_FCP_IOQ_FLUSH 0x8000 /* FCP I/O queues being flushed */ +#define HBA_IOQ_FLUSH 0x8000 /* FCP/NVME I/O queues being flushed */ #define HBA_FW_DUMP_OP 0x10000 /* Skips fn reset before FW dump */ #define HBA_RECOVERABLE_UE 0x20000 /* Firmware supports recoverable UE */ #define HBA_FORCED_LINK_SPEED 0x40000 /* * Firmware supports Forced Link Speed * capability */ -#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */ #define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */ uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/ @@ -795,10 +803,12 @@ struct lpfc_hba { uint8_t mds_diags_support; uint8_t bbcredit_support; uint8_t enab_exp_wqcq_pages; + u8 nsler; /* Firmware supports FC-NVMe-2 SLER */ /* HBA Config Parameters */ uint32_t cfg_ack0; uint32_t cfg_xri_rebalancing; + uint32_t cfg_xpsgl; uint32_t cfg_enable_npiv; uint32_t cfg_enable_rrq; uint32_t cfg_topology; @@ -824,8 +834,10 @@ struct lpfc_hba { uint32_t cfg_cq_poll_threshold; uint32_t cfg_cq_max_proc_limit; uint32_t cfg_fcp_cpu_map; + uint32_t cfg_fcp_mq_threshold; uint32_t cfg_hdw_queue; uint32_t cfg_irq_chann; + uint32_t cfg_irq_numa; uint32_t cfg_suppress_rsp; uint32_t cfg_nvme_oas; uint32_t cfg_nvme_embed_cmd; @@ -868,7 +880,6 @@ struct lpfc_hba { uint32_t cfg_aer_support; uint32_t cfg_sriov_nr_virtfn; uint32_t cfg_request_firmware_upgrade; - uint32_t cfg_iocb_cnt; uint32_t cfg_suppress_link_up; uint32_t cfg_rrq_xri_bitmap_sz; uint32_t cfg_delay_discovery; @@ -904,6 +915,7 @@ struct lpfc_hba { wait_queue_head_t work_waitq; struct task_struct *worker_thread; unsigned long data_flags; + uint32_t border_sge_num; uint32_t hbq_in_use; /* HBQs in use flag */ uint32_t hbq_count; /* Count of configured HBQs */ @@ -985,7 +997,7 @@ struct lpfc_hba { struct dma_pool *lpfc_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_nvmet_drb_pool; /* data receive buffer pool */ struct dma_pool *lpfc_hbq_pool; /* SLI3 hbq buffer pool */ - struct dma_pool *txrdy_payload_pool; + struct dma_pool *lpfc_cmd_rsp_buf_pool; struct lpfc_dma_pool lpfc_mbuf_safety_pool; mempool_t *mbox_mem_pool; @@ -1033,8 +1045,6 @@ struct lpfc_hba { struct dentry *debug_hbqinfo; struct dentry *debug_dumpHostSlim; struct dentry *debug_dumpHBASlim; - struct dentry *debug_dumpData; /* BlockGuard BPL */ - struct dentry *debug_dumpDif; /* BlockGuard BPL */ struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ struct dentry *debug_InjErrNPortID; /* NPortID to inject errors at */ struct dentry *debug_InjErrWWPN; /* WWPN to inject errors at */ @@ -1051,6 +1061,7 @@ struct lpfc_hba { #ifdef LPFC_HDWQ_LOCK_STAT struct dentry *debug_lockstat; #endif + struct dentry *debug_ras_log; atomic_t nvmeio_trc_cnt; uint32_t nvmeio_trc_size; uint32_t nvmeio_trc_output_idx; @@ -1205,6 +1216,15 @@ struct lpfc_hba { uint64_t ktime_seg10_min; uint64_t ktime_seg10_max; #endif + + struct hlist_node cpuhp; /* used for cpuhp per hba callback */ + struct timer_list cpuhp_poll_timer; + struct list_head poll_list; /* slowpath eq polling list */ +#define LPFC_POLL_HB 1 /* slowpath heartbeat */ +#define LPFC_POLL_FASTPATH 0 /* called from fastpath */ +#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */ + + char os_host_name[MAXHOSTNAMELEN]; }; static inline struct Scsi_Host * @@ -1295,6 +1315,26 @@ lpfc_phba_elsring(struct lpfc_hba *phba) } /** + * lpfc_next_online_numa_cpu - Finds next online CPU on NUMA node + * @numa_mask: Pointer to phba's numa_mask member. + * @start: starting cpu index + * + * Note: If no valid cpu found, then nr_cpu_ids is returned. + * + **/ +static inline unsigned int +lpfc_next_online_numa_cpu(const struct cpumask *numa_mask, unsigned int start) +{ + unsigned int cpu_it; + + for_each_cpu_wrap(cpu_it, numa_mask, start) { + if (cpu_online(cpu_it)) + break; + } + + return cpu_it; +} +/** * lpfc_sli4_mod_hba_eq_delay - update EQ delay * @phba: Pointer to HBA context object. * @q: The Event Queue to update. diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index ea62322ffe2b..46f56f30f77e 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -176,7 +176,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, int i; int len = 0; char tmp[LPFC_MAX_NVME_INFO_TMP_LEN] = {0}; - unsigned long iflags = 0; if (!(vport->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) { len = scnprintf(buf, PAGE_SIZE, "NVME Disabled\n"); @@ -347,7 +346,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE) goto buffer_done; - rcu_read_lock(); scnprintf(tmp, sizeof(tmp), "XRI Dist lpfc%d Total %d IO %d ELS %d\n", phba->brd_no, @@ -355,7 +353,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, phba->sli4_hba.io_xri_max, lpfc_sli4_get_els_iocb_cnt(phba)); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; /* Port state is only one of two values for now. */ if (localport->port_id) @@ -371,15 +369,17 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, wwn_to_u64(vport->fc_nodename.u.wwn), localport->port_id, statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto buffer_done; + + spin_lock_irq(shost->host_lock); list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { nrport = NULL; - spin_lock_irqsave(&vport->phba->hbalock, iflags); + spin_lock(&vport->phba->hbalock); rport = lpfc_ndlp_get_nrport(ndlp); if (rport) nrport = rport->remoteport; - spin_unlock_irqrestore(&vport->phba->hbalock, iflags); + spin_unlock(&vport->phba->hbalock); if (!nrport) continue; @@ -398,39 +398,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, /* Tab in to show lport ownership. */ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; if (phba->brd_no >= 10) { if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "WWPN x%llx ", nrport->port_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "WWNN x%llx ", nrport->node_name); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; scnprintf(tmp, sizeof(tmp), "DID x%06x ", nrport->port_id); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; /* An NVME rport can have multiple roles. */ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) { if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) { if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) { if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR | FC_PORT_ROLE_NVME_TARGET | @@ -438,14 +438,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x", nrport->port_role); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } scnprintf(tmp, sizeof(tmp), "%s\n", statep); if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE) - goto rcu_unlock_buf_done; + goto unlock_buf_done; } - rcu_read_unlock(); + spin_unlock_irq(shost->host_lock); if (!lport) goto buffer_done; @@ -505,11 +505,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr, atomic_read(&lport->cmpl_fcp_err)); strlcat(buf, tmp, PAGE_SIZE); - /* RCU is already unlocked. */ + /* host_lock is already unlocked. */ goto buffer_done; - rcu_unlock_buf_done: - rcu_read_unlock(); + unlock_buf_done: + spin_unlock_irq(shost->host_lock); buffer_done: len = strnlen(buf, PAGE_SIZE); @@ -841,7 +841,8 @@ lpfc_hdw_show(struct device *dev, struct device_attribute *attr, char *buf) lpfc_vpd_t *vp = &phba->vpd; lpfc_jedec_to_ascii(vp->rev.biuRev, hdw); - return scnprintf(buf, PAGE_SIZE, "%s\n", hdw); + return scnprintf(buf, PAGE_SIZE, "%s %08x %08x\n", hdw, + vp->rev.smRev, vp->rev.smFwRev); } /** @@ -1474,8 +1475,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) int i; msleep(100); - lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, - &portstat_reg.word0); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + return -EIO; /* verify if privileged for the request operation */ if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && @@ -1485,8 +1487,9 @@ lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); - lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, - &portstat_reg.word0); + if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, + &portstat_reg.word0)) + continue; if (!bf_get(lpfc_sliport_status_err, &portstat_reg)) continue; if (!bf_get(lpfc_sliport_status_rn, &portstat_reg)) @@ -1641,7 +1644,7 @@ lpfc_set_trunking(struct lpfc_hba *phba, char *buff_out) { LPFC_MBOXQ_t *mbox = NULL; unsigned long val = 0; - char *pval = 0; + char *pval = NULL; int rc = 0; if (!strncmp("enable", buff_out, @@ -3532,6 +3535,31 @@ LPFC_ATTR_R(enable_rrq, 2, 0, 2, LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK, LPFC_DELAY_INIT_LINK_INDEFINITELY, "Suppress Link Up at initialization"); + +static ssize_t +lpfc_pls_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + phba->sli4_hba.pc_sli4_params.pls); +} +static DEVICE_ATTR(pls, 0444, + lpfc_pls_show, NULL); + +static ssize_t +lpfc_pt_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba; + + return scnprintf(buf, PAGE_SIZE, "%d\n", + (phba->hba_flag & HBA_PERSISTENT_TOPO) ? 1 : 0); +} +static DEVICE_ATTR(pt, 0444, + lpfc_pt_show, NULL); + /* # lpfc_cnt: Number of IOCBs allocated for ELS, CT, and ABTS # 1 - (1024) @@ -3579,9 +3607,6 @@ lpfc_txcmplq_hw_show(struct device *dev, struct device_attribute *attr, static DEVICE_ATTR(txcmplq_hw, S_IRUGO, lpfc_txcmplq_hw_show, NULL); -LPFC_ATTR_R(iocb_cnt, 2, 1, 5, - "Number of IOCBs alloc for ELS, CT, and ABTS: 1k to 5k IOCBs"); - /* # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear # until the timer expires. Value range is [0,255]. Default value is 30. @@ -3682,8 +3707,8 @@ lpfc_update_rport_devloss_tmo(struct lpfc_vport *vport) if (rport) remoteport = rport->remoteport; spin_unlock(&vport->phba->hbalock); - if (remoteport) - nvme_fc_set_remoteport_devloss(rport->remoteport, + if (rport && remoteport) + nvme_fc_set_remoteport_devloss(remoteport, vport->cfg_devloss_tmo); #endif } @@ -4095,8 +4120,16 @@ lpfc_topology_store(struct device *dev, struct device_attribute *attr, val); return -EINVAL; } - if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || - phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + /* + * The 'topology' is not a configurable parameter if : + * - persistent topology enabled + * - G7/G6 with no private loop support + */ + + if ((phba->hba_flag & HBA_PERSISTENT_TOPO || + (!phba->sli4_hba.pc_sli4_params.pls && + (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || + phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC))) && val == 4) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "3114 Loop mode not supported\n"); @@ -5297,7 +5330,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, len += scnprintf(buf + len, PAGE_SIZE - len, "CPU %02d not present\n", phba->sli4_hba.curr_disp_cpu); - else if (cpup->irq == LPFC_VECTOR_MAP_EMPTY) { + else if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) { if (cpup->hdwq == LPFC_VECTOR_MAP_EMPTY) len += scnprintf( buf + len, PAGE_SIZE - len, @@ -5310,10 +5343,10 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, else len += scnprintf( buf + len, PAGE_SIZE - len, - "CPU %02d EQ %04d hdwq %04d " + "CPU %02d EQ None hdwq %04d " "physid %d coreid %d ht %d ua %d\n", phba->sli4_hba.curr_disp_cpu, - cpup->eq, cpup->hdwq, cpup->phys_id, + cpup->hdwq, cpup->phys_id, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN)); @@ -5328,7 +5361,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), - cpup->irq); + lpfc_get_irq(cpup->eq)); else len += scnprintf( buf + len, PAGE_SIZE - len, @@ -5339,7 +5372,7 @@ lpfc_fcp_cpu_map_show(struct device *dev, struct device_attribute *attr, cpup->core_id, (cpup->flag & LPFC_CPU_MAP_HYPER), (cpup->flag & LPFC_CPU_MAP_UNASSIGN), - cpup->irq); + lpfc_get_irq(cpup->eq)); } phba->sli4_hba.curr_disp_cpu++; @@ -5467,15 +5500,12 @@ LPFC_ATTR_RW(nvmet_fb_size, 0, 0, 65536, * lpfc_nvme_enable_fb: Enable NVME first burst on I and T functions. * For the Initiator (I), enabling this parameter means that an NVMET * PRLI response with FBA enabled and an FB_SIZE set to a nonzero value will be - * processed by the initiator for subsequent NVME FCP IO. For the target - * function (T), enabling this parameter qualifies the lpfc_nvmet_fb_size - * driver parameter as the target function's first burst size returned to the - * initiator in the target's NVME PRLI response. Parameter supported on physical - * port only - no NPIV support. + * processed by the initiator for subsequent NVME FCP IO. + * Currently, this feature is not supported on the NVME target * Value range is [0,1]. Default value is 0 (disabled). */ LPFC_ATTR_RW(nvme_enable_fb, 0, 0, 1, - "Enable First Burst feature on I and T functions."); + "Enable First Burst feature for NVME Initiator."); /* # lpfc_max_scsicmpl_time: Use scsi command completion time to control I/O queue @@ -5709,6 +5739,19 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, "Embed NVME Command in WQE"); /* + * lpfc_fcp_mq_threshold: Set the maximum number of Hardware Queues + * the driver will advertise it supports to the SCSI layer. + * + * 0 = Set nr_hw_queues by the number of CPUs or HW queues. + * 1,256 = Manually specify nr_hw_queue value to be advertised, + * + * Value range is [0,256]. Default value is 8. + */ +LPFC_ATTR_R(fcp_mq_threshold, LPFC_FCP_MQ_THRESHOLD_DEF, + LPFC_FCP_MQ_THRESHOLD_MIN, LPFC_FCP_MQ_THRESHOLD_MAX, + "Set the number of SCSI Queues advertised"); + +/* * lpfc_hdw_queue: Set the number of Hardware Queues the driver * will advertise it supports to the NVME and SCSI layers. This also * will map to the number of CQ/WQ pairs the driver will create. @@ -5718,30 +5761,130 @@ LPFC_ATTR_RW(nvme_embed_cmd, 1, 0, 2, * A hardware IO queue maps (qidx) to a specific driver CQ/WQ. * * 0 = Configure the number of hdw queues to the number of active CPUs. - * 1,128 = Manually specify how many hdw queues to use. + * 1,256 = Manually specify how many hdw queues to use. * - * Value range is [0,128]. Default value is 0. + * Value range is [0,256]. Default value is 0. */ LPFC_ATTR_R(hdw_queue, LPFC_HBA_HDWQ_DEF, LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, "Set the number of I/O Hardware Queues"); +static inline void +lpfc_assign_default_irq_numa(struct lpfc_hba *phba) +{ +#if IS_ENABLED(CONFIG_X86) + /* If AMD architecture, then default is LPFC_IRQ_CHANN_NUMA */ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) + phba->cfg_irq_numa = 1; + else + phba->cfg_irq_numa = 0; +#else + phba->cfg_irq_numa = 0; +#endif +} + /* * lpfc_irq_chann: Set the number of IRQ vectors that are available * for Hardware Queues to utilize. This also will map to the number * of EQ / MSI-X vectors the driver will create. This should never be * more than the number of Hardware Queues * - * 0 = Configure number of IRQ Channels to the number of active CPUs. - * 1,128 = Manually specify how many IRQ Channels to use. + * 0 = Configure number of IRQ Channels to: + * if AMD architecture, number of CPUs on HBA's NUMA node + * otherwise, number of active CPUs. + * [1,256] = Manually specify how many IRQ Channels to use. * - * Value range is [0,128]. Default value is 0. + * Value range is [0,256]. Default value is [0]. */ -LPFC_ATTR_R(irq_chann, - LPFC_HBA_HDWQ_DEF, - LPFC_HBA_HDWQ_MIN, LPFC_HBA_HDWQ_MAX, - "Set the number of I/O IRQ Channels"); +static uint lpfc_irq_chann = LPFC_IRQ_CHANN_DEF; +module_param(lpfc_irq_chann, uint, 0444); +MODULE_PARM_DESC(lpfc_irq_chann, "Set number of interrupt vectors to allocate"); + +/* lpfc_irq_chann_init - Set the hba irq_chann initial value + * @phba: lpfc_hba pointer. + * @val: contains the initial value + * + * Description: + * Validates the initial value is within range and assigns it to the + * adapter. If not in range, an error message is posted and the + * default value is assigned. + * + * Returns: + * zero if value is in range and is set + * -EINVAL if value was out of range + **/ +static int +lpfc_irq_chann_init(struct lpfc_hba *phba, uint32_t val) +{ + const struct cpumask *numa_mask; + + if (phba->cfg_use_msi != 2) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8532 use_msi = %u ignoring cfg_irq_numa\n", + phba->cfg_use_msi); + phba->cfg_irq_numa = 0; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; + return 0; + } + + /* Check if default setting was passed */ + if (val == LPFC_IRQ_CHANN_DEF) + lpfc_assign_default_irq_numa(phba); + + if (phba->cfg_irq_numa) { + numa_mask = &phba->sli4_hba.numa_mask; + + if (cpumask_empty(numa_mask)) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8533 Could not identify NUMA node, " + "ignoring cfg_irq_numa\n"); + phba->cfg_irq_numa = 0; + phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; + } else { + phba->cfg_irq_chann = cpumask_weight(numa_mask); + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8543 lpfc_irq_chann set to %u " + "(numa)\n", phba->cfg_irq_chann); + } + } else { + if (val > LPFC_IRQ_CHANN_MAX) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "8545 lpfc_irq_chann attribute cannot " + "be set to %u, allowed range is " + "[%u,%u]\n", + val, + LPFC_IRQ_CHANN_MIN, + LPFC_IRQ_CHANN_MAX); + phba->cfg_irq_chann = LPFC_IRQ_CHANN_MIN; + return -EINVAL; + } + phba->cfg_irq_chann = val; + } + + return 0; +} + +/** + * lpfc_irq_chann_show - Display value of irq_chann + * @dev: class converted to a Scsi_host structure. + * @attr: device attribute, not used. + * @buf: on return contains a string with the list sizes + * + * Returns: size of formatted string. + **/ +static ssize_t +lpfc_irq_chann_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata; + struct lpfc_hba *phba = vport->phba; + + return scnprintf(buf, PAGE_SIZE, "%u\n", phba->cfg_irq_chann); +} + +static DEVICE_ATTR_RO(lpfc_irq_chann); /* # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware. @@ -5914,7 +6057,7 @@ lpfc_sg_seg_cnt_init(struct lpfc_hba *phba, int val) * 1 = MDS Diagnostics enabled * Value range is [0,1]. Default value is 0. */ -LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); +LPFC_ATTR_RW(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); /* * lpfc_ras_fwlog_buffsize: Firmware logging host buffer size @@ -5922,7 +6065,53 @@ LPFC_ATTR_R(enable_mds_diags, 0, 0, 1, "Enable MDS Diagnostics"); * [1-4] = Multiple of 1/4th Mb of host memory for FW logging * Value range [0..4]. Default value is 0 */ -LPFC_ATTR_RW(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); +LPFC_ATTR(ras_fwlog_buffsize, 0, 0, 4, "Host memory for FW logging"); +lpfc_param_show(ras_fwlog_buffsize); + +static ssize_t +lpfc_ras_fwlog_buffsize_set(struct lpfc_hba *phba, uint val) +{ + int ret = 0; + enum ras_state state; + + if (!lpfc_rangecheck(val, 0, 4)) + return -EINVAL; + + if (phba->cfg_ras_fwlog_buffsize == val) + return 0; + + if (phba->cfg_ras_fwlog_func != PCI_FUNC(phba->pcidev->devfn)) + return -EINVAL; + + spin_lock_irq(&phba->hbalock); + state = phba->ras_fwlog.state; + spin_unlock_irq(&phba->hbalock); + + if (state == REG_INPROGRESS) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "6147 RAS Logging " + "registration is in progress\n"); + return -EBUSY; + } + + /* For disable logging: stop the logs and free the DMA. + * For ras_fwlog_buffsize size change we still need to free and + * reallocate the DMA in lpfc_sli4_ras_fwlog_init. + */ + phba->cfg_ras_fwlog_buffsize = val; + if (state == ACTIVE) { + lpfc_ras_stop_fwlog(phba); + lpfc_sli4_ras_dma_free(phba); + } + + lpfc_sli4_ras_init(phba); + if (phba->ras_fwlog.ras_enabled) + ret = lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level, + LPFC_RAS_ENABLE_LOGGING); + return ret; +} + +lpfc_param_store(ras_fwlog_buffsize); +static DEVICE_ATTR_RW(lpfc_ras_fwlog_buffsize); /* * lpfc_ras_fwlog_level: Firmware logging verbosity level @@ -6030,6 +6219,7 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_cq_poll_threshold, &dev_attr_lpfc_cq_max_proc_limit, &dev_attr_lpfc_fcp_cpu_map, + &dev_attr_lpfc_fcp_mq_threshold, &dev_attr_lpfc_hdw_queue, &dev_attr_lpfc_irq_chann, &dev_attr_lpfc_suppress_rsp, @@ -6059,8 +6249,9 @@ struct device_attribute *lpfc_hba_attrs[] = { &dev_attr_lpfc_sriov_nr_virtfn, &dev_attr_lpfc_req_fw_upgrade, &dev_attr_lpfc_suppress_link_up, - &dev_attr_lpfc_iocb_cnt, &dev_attr_iocb_hw, + &dev_attr_pls, + &dev_attr_pt, &dev_attr_txq_hw, &dev_attr_txcmplq_hw, &dev_attr_lpfc_fips_level, @@ -6845,10 +7036,31 @@ lpfc_get_starget_port_name(struct scsi_target *starget) static void lpfc_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout) { + struct lpfc_rport_data *rdata = rport->dd_data; + struct lpfc_nodelist *ndlp = rdata->pnode; +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_nvme_rport *nrport = NULL; +#endif + if (timeout) rport->dev_loss_tmo = timeout; else rport->dev_loss_tmo = 1; + + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { + dev_info(&rport->dev, "Cannot find remote node to " + "set rport dev loss tmo, port_id x%x\n", + rport->port_id); + return; + } + +#if (IS_ENABLED(CONFIG_NVME_FC)) + nrport = lpfc_ndlp_get_nrport(ndlp); + + if (nrport && nrport->remoteport) + nvme_fc_set_remoteport_devloss(nrport->remoteport, + rport->dev_loss_tmo); +#endif } /** @@ -7045,12 +7257,39 @@ struct fc_function_template lpfc_vport_transport_functions = { }; /** + * lpfc_get_hba_function_mode - Used to determine the HBA function in FCoE + * Mode + * @phba: lpfc_hba pointer. + **/ +static void +lpfc_get_hba_function_mode(struct lpfc_hba *phba) +{ + /* If the adapter supports FCoE mode */ + switch (phba->pcidev->device) { + case PCI_DEVICE_ID_SKYHAWK: + case PCI_DEVICE_ID_SKYHAWK_VF: + case PCI_DEVICE_ID_LANCER_FCOE: + case PCI_DEVICE_ID_LANCER_FCOE_VF: + case PCI_DEVICE_ID_ZEPHYR_DCSP: + case PCI_DEVICE_ID_HORNET: + case PCI_DEVICE_ID_TIGERSHARK: + case PCI_DEVICE_ID_TOMCAT: + phba->hba_flag |= HBA_FCOE_MODE; + break; + default: + /* for others, clear the flag */ + phba->hba_flag &= ~HBA_FCOE_MODE; + } +} + +/** * lpfc_get_cfgparam - Used during probe_one to init the adapter structure * @phba: lpfc_hba pointer. **/ void lpfc_get_cfgparam(struct lpfc_hba *phba) { + lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched); lpfc_ns_query_init(phba, lpfc_ns_query); lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset); @@ -7100,8 +7339,18 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) else phba->cfg_poll = lpfc_poll; - if (phba->cfg_enable_bg) + /* Get the function mode */ + lpfc_get_hba_function_mode(phba); + + /* BlockGuard allowed for FC only. */ + if (phba->cfg_enable_bg && phba->hba_flag & HBA_FCOE_MODE) { + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "0581 BlockGuard feature not supported\n"); + /* If set, clear the BlockGuard support param */ + phba->cfg_enable_bg = 0; + } else if (phba->cfg_enable_bg) { phba->sli3_options |= LPFC_SLI3_BG_ENABLED; + } lpfc_suppress_rsp_init(phba, lpfc_suppress_rsp); @@ -7112,6 +7361,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) /* Initialize first burst. Target vs Initiator are different. */ lpfc_nvme_enable_fb_init(phba, lpfc_nvme_enable_fb); lpfc_nvmet_fb_size_init(phba, lpfc_nvmet_fb_size); + lpfc_fcp_mq_threshold_init(phba, lpfc_fcp_mq_threshold); lpfc_hdw_queue_init(phba, lpfc_hdw_queue); lpfc_irq_chann_init(phba, lpfc_irq_chann); lpfc_enable_bbcr_init(phba, lpfc_enable_bbcr); @@ -7146,12 +7396,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) phba->cfg_soft_wwpn = 0L; lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt); lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth); - lpfc_hba_log_verbose_init(phba, lpfc_log_verbose); lpfc_aer_support_init(phba, lpfc_aer_support); lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn); lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade); lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up); - lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt); lpfc_delay_discovery_init(phba, lpfc_delay_discovery); lpfc_sli_mode_init(phba, lpfc_sli_mode); phba->cfg_enable_dss = 1; @@ -7160,16 +7408,6 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) lpfc_ras_fwlog_level_init(phba, lpfc_ras_fwlog_level); lpfc_ras_fwlog_func_init(phba, lpfc_ras_fwlog_func); - - /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to - * accommodate 512K and 1M IOs in a single nvme buf and supply - * enough NVME LS iocb buffers for larger connectivity counts. - */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; - phba->cfg_iocb_cnt = 5; - } - return; } @@ -7207,11 +7445,11 @@ lpfc_nvme_mod_param_dep(struct lpfc_hba *phba) } if (!phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; /* Adjust lpfc_nvmet_mrq to avoid running out of WQE slots */ - if (phba->cfg_nvmet_mrq > phba->cfg_irq_chann) { - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + if (phba->cfg_nvmet_mrq > phba->cfg_hdw_queue) { + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC, "6018 Adjust lpfc_nvmet_mrq to %d\n", phba->cfg_nvmet_mrq); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index b7216d694bff..0ea03ae93d91 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -1040,7 +1040,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, if (!dmabuf) { lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC, "2616 No dmabuf " - "found for iocbq 0x%p\n", + "found for iocbq x%px\n", iocbq); kfree(evt_dat->data); kfree(evt_dat); @@ -1276,9 +1276,7 @@ lpfc_bsg_hba_set_event(struct bsg_job *job) return 0; /* call job done later */ job_error: - if (dd_data != NULL) - kfree(dd_data); - + kfree(dd_data); job->dd_data = NULL; return rc; } @@ -1571,7 +1569,6 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct bsg_job *job, uint32_t tag, "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n", icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state); - ctiocb->iocb_cmpl = NULL; ctiocb->iocb_flag |= LPFC_IO_LIBDFC; ctiocb->vport = phba->pport; ctiocb->context1 = dd_data; @@ -4492,12 +4489,6 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, phba->mbox_ext_buf_ctx.seqNum++; nemb_tp = phba->mbox_ext_buf_ctx.nembType; - dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); - if (!dd_data) { - rc = -ENOMEM; - goto job_error; - } - pbuf = (uint8_t *)dmabuf->virt; size = job->request_payload.payload_len; sg_copy_to_buffer(job->request_payload.sg_list, @@ -4534,6 +4525,13 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, "2968 SLI_CONFIG ext-buffer wr all %d " "ebuffers received\n", phba->mbox_ext_buf_ctx.numBuf); + + dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL); + if (!dd_data) { + rc = -ENOMEM; + goto job_error; + } + /* mailbox command structure for base driver */ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!pmboxq) { @@ -4582,6 +4580,8 @@ lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct bsg_job *job, return SLI_CONFIG_HANDLED; job_error: + if (pmboxq) + mempool_free(pmboxq, phba->mbox_mem_pool); lpfc_bsg_dma_page_free(phba, dmabuf); kfree(dd_data); @@ -5438,10 +5438,12 @@ lpfc_bsg_get_ras_config(struct bsg_job *job) bsg_reply->reply_data.vendor_reply.vendor_rsp; /* Current logging state */ - if (ras_fwlog->ras_active == true) + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) ras_reply->state = LPFC_RASLOG_STATE_RUNNING; else ras_reply->state = LPFC_RASLOG_STATE_STOPPED; + spin_unlock_irq(&phba->hbalock); ras_reply->log_level = phba->ras_fwlog.fw_loglevel; ras_reply->log_buff_sz = phba->cfg_ras_fwlog_buffsize; @@ -5451,7 +5453,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5496,10 +5500,13 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) if (action == LPFC_RASACTION_STOP_LOGGING) { /* Check if already disabled */ - if (ras_fwlog->ras_active == false) { + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); rc = -ESRCH; goto ras_job_error; } + spin_unlock_irq(&phba->hbalock); /* Disable logging */ lpfc_ras_stop_fwlog(phba); @@ -5510,8 +5517,10 @@ lpfc_bsg_set_ras_config(struct bsg_job *job) * FW-logging with new log-level. Return status * "Logging already Running" to caller. **/ - if (ras_fwlog->ras_active) + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state != INACTIVE) action_status = -EINPROGRESS; + spin_unlock_irq(&phba->hbalock); /* Enable logging */ rc = lpfc_sli4_ras_fwlog_init(phba, log_level, @@ -5530,8 +5539,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, - bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5591,7 +5601,9 @@ ras_job_error: bsg_reply->result = rc; /* complete the job back to userspace */ - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5624,10 +5636,13 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) goto ras_job_error; /* Logging to be stopped before reading */ - if (ras_fwlog->ras_active == true) { + spin_lock_irq(&phba->hbalock); + if (ras_fwlog->state == ACTIVE) { + spin_unlock_irq(&phba->hbalock); rc = -EINPROGRESS; goto ras_job_error; } + spin_unlock_irq(&phba->hbalock); if (job->request_len < sizeof(struct fc_bsg_request) + @@ -5673,7 +5688,9 @@ lpfc_bsg_get_ras_fwlog(struct bsg_job *job) ras_job_error: bsg_reply->result = rc; - bsg_job_done(job, bsg_reply->result, bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } @@ -5744,8 +5761,9 @@ lpfc_get_trunk_info(struct bsg_job *job) phba->sli4_hba.link_state.logical_speed / 1000; job_error: bsg_reply->result = rc; - bsg_job_done(job, bsg_reply->result, - bsg_reply->reply_payload_rcv_len); + if (!rc) + bsg_job_done(job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); return rc; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 68e9f96242d3..25d3dd39bc05 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -180,7 +180,7 @@ int lpfc_issue_gidft(struct lpfc_vport *vport); int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); -void lpfc_fdmi_num_disc_check(struct lpfc_vport *); +void lpfc_fdmi_change_check(struct lpfc_vport *vport); void lpfc_delayed_disc_tmo(struct timer_list *); void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); @@ -215,6 +215,12 @@ irqreturn_t lpfc_sli_fp_intr_handler(int, void *); irqreturn_t lpfc_sli4_intr_handler(int, void *); irqreturn_t lpfc_sli4_hba_intr_handler(int, void *); +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); +int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path); +void lpfc_sli4_poll_hbtimer(struct timer_list *t); +void lpfc_sli4_start_polling(struct lpfc_queue *q); +void lpfc_sli4_stop_polling(struct lpfc_queue *q); + void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_swap_str(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *); @@ -326,7 +332,7 @@ void lpfc_sli_bemem_bcopy(void *, void *, uint32_t); void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *); void lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba); void lpfc_sli_hba_iocb_abort(struct lpfc_hba *); -void lpfc_sli_flush_fcp_rings(struct lpfc_hba *); +void lpfc_sli_flush_io_rings(struct lpfc_hba *phba); int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_dmabuf *); struct lpfc_dmabuf *lpfc_sli_ringpostbuf_get(struct lpfc_hba *, @@ -433,16 +439,6 @@ int lpfc_sli4_get_allocated_extnts(struct lpfc_hba *, uint16_t, int lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *, uint16_t, uint16_t *, uint16_t *); -/* externs BlockGuard */ -extern char *_dump_buf_data; -extern unsigned long _dump_buf_data_order; -extern char *_dump_buf_dif; -extern unsigned long _dump_buf_dif_order; -extern spinlock_t _dump_buf_lock; -extern int _dump_buf_done; -extern spinlock_t pgcnt_lock; -extern unsigned int pgcnt; - /* Interface exported by fabric iocb scheduler */ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); void lpfc_fabric_abort_hba(struct lpfc_hba *); @@ -595,6 +591,8 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd, struct lpfc_sli4_hdw_queue *qp); void lpfc_nvme_cmd_template(void); void lpfc_nvmet_cmd_template(void); +void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn); +void lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt); extern int lpfc_enable_nvmet_cnt; extern unsigned long long lpfc_enable_nvmet[]; extern int lpfc_no_hba_reset_cnt; diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index ec72c39997d2..58b35a1442c1 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -462,6 +462,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) struct lpfc_nodelist *ndlp; if ((vport->port_type != LPFC_NPIV_PORT) || + (fc4_type == FC_TYPE_FCP) || !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { ndlp = lpfc_setup_disc_node(vport, Did); @@ -480,10 +481,20 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%x\n", Did, + "Data: x%x x%x x%x x%x x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, - vport->fc_flag, + ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); + + /* if ndlp needs to be discovered and prior + * state of ndlp hit devloss, change state to + * allow rediscovery. + */ + if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip1 GID_FTrsp: did:x%x flg:x%x cnt:%d", @@ -491,9 +502,9 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0239 Skip x%06x NameServer Rsp " - "Data: x%x x%x\n", Did, - vport->fc_flag, - vport->fc_rscn_id_cnt); + "Data: x%x x%x %p\n", + Did, vport->fc_flag, + vport->fc_rscn_id_cnt, ndlp); } } else { if (!(vport->fc_flag & FC_RSCN_MODE) || @@ -751,9 +762,13 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0208 NameServer Rsp Data: x%x x%x\n", + "0208 NameServer Rsp Data: x%x x%x " + "x%x x%x sz x%x\n", vport->fc_flag, - CTreq->un.gid.Fc4Type); + CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, + irsp->un.genreq64.bdl.bdeSize); lpfc_ns_rsp(vport, outp, @@ -814,6 +829,11 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } vport->gidft_inp--; } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "4216 GID_FT cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { @@ -943,9 +963,13 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "4105 NameServer Rsp Data: x%x x%x\n", + "4105 NameServer Rsp Data: x%x x%x " + "x%x x%x sz x%x\n", vport->fc_flag, - CTreq->un.gid.Fc4Type); + CTreq->un.gid.Fc4Type, + vport->num_disc_nodes, + vport->gidft_inp, + irsp->un.genreq64.bdl.bdeSize); lpfc_ns_rsp(vport, outp, @@ -1007,6 +1031,11 @@ lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, } vport->gidft_inp--; } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6450 GID_PT cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { @@ -1141,6 +1170,11 @@ out: /* Link up / RSCN discovery */ if (vport->num_disc_nodes) vport->num_disc_nodes--; + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6451 GFF_ID cmpl inp %d disc %d\n", + vport->gidft_inp, vport->num_disc_nodes); + if (vport->num_disc_nodes == 0) { /* * The driver has cycled through all Nports in the RSCN payload. @@ -1209,14 +1243,34 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "3064 Setting ndlp %p, DID x%06x with " - "FC4 x%08x, Data: x%08x x%08x\n", + "3064 Setting ndlp x%px, DID x%06x " + "with FC4 x%08x, Data: x%08x x%08x " + "%d\n", ndlp, did, ndlp->nlp_fc4_type, - FC_TYPE_FCP, FC_TYPE_NVME); - ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; - - lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); - lpfc_issue_els_prli(vport, ndlp, 0); + FC_TYPE_FCP, FC_TYPE_NVME, + ndlp->nlp_state); + + if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_fc4_type) { + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_PRLI_ISSUE); + lpfc_issue_els_prli(vport, ndlp, 0); + } else if (!ndlp->nlp_fc4_type) { + /* If fc4 type is still unknown, then LOGO */ + lpfc_printf_vlog(vport, KERN_INFO, + LOG_DISCOVERY, + "6443 Sending LOGO ndlp x%px," + "DID x%06x with fc4_type: " + "x%08x, state: %d\n", + ndlp, did, ndlp->nlp_fc4_type, + ndlp->nlp_state); + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, + NLP_STE_NPR_NODE); + } } } else lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY, @@ -1439,33 +1493,35 @@ int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { - char fwrev[FW_REV_STR_SIZE]; - int n; + char fwrev[FW_REV_STR_SIZE] = {0}; + char tmp[MAXHOSTNAMELEN] = {0}; - lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + memset(symbol, 0, size); - n = scnprintf(symbol, size, "Emulex %s", vport->phba->ModelName); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " FV%s", fwrev); - if (size < n) - return n; + lpfc_decode_firmware_rev(vport->phba, fwrev, 0); + scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " DV%s.", - lpfc_release_version); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; - n += scnprintf(symbol + n, size - n, " HN:%s.", - init_utsname()->nodename); - if (size < n) - return n; + scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); + if (strlcat(symbol, tmp, size) >= size) + goto buffer_done; /* Note :- OS name is "Linux" */ - n += scnprintf(symbol + n, size - n, " OS:%s", - init_utsname()->sysname); - return n; + scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); + strlcat(symbol, tmp, size); + +buffer_done: + return strnlen(symbol, size); + } static uint32_t @@ -1830,6 +1886,12 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (irsp->ulpStatus == IOSTAT_LOCAL_REJECT) { switch ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK)) { case IOERR_SLI_ABORTED: + case IOERR_SLI_DOWN: + /* Driver aborted this IO. No retry as error + * is likely Offline->Online or some adapter + * error. Recovery will try again. + */ + break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: case IOERR_ILLEGAL_FRAME: @@ -1938,14 +2000,16 @@ lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /** - * lpfc_fdmi_num_disc_check - Check how many mapped NPorts we are connected to + * lpfc_fdmi_change_check - Check for changed FDMI parameters * @vport: pointer to a host virtual N_Port data structure. * - * Called from hbeat timeout routine to check if the number of discovered - * ports has changed. If so, re-register thar port Attribute. + * Check how many mapped NPorts we are connected to + * Check if our hostname changed + * Called from hbeat timeout routine to check if any FDMI parameters + * changed. If so, re-register those Attributes. */ void -lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) +lpfc_fdmi_change_check(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; @@ -1958,17 +2022,41 @@ lpfc_fdmi_num_disc_check(struct lpfc_vport *vport) if (!(vport->fc_flag & FC_FABRIC)) return; + ndlp = lpfc_findnode_did(vport, FDMI_DID); + if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) + return; + + /* Check if system hostname changed */ + if (strcmp(phba->os_host_name, init_utsname()->nodename)) { + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); + lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); + + /* Since this effects multiple HBA and PORT attributes, we need + * de-register and go thru the whole FDMI registration cycle. + * DHBA -> DPRT -> RHBA -> RPA (physical port) + * DPRT -> RPRT (vports) + */ + if (vport->port_type == LPFC_PHYSICAL_PORT) + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); + else + lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); + + /* Since this code path registers all the port attributes + * we can just return without further checking. + */ + return; + } + if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; + /* Check if the number of mapped NPorts changed */ cnt = lpfc_find_map_node(vport); if (cnt == vport->fdmi_num_disc) return; - ndlp = lpfc_findnode_did(vport, FDMI_DID); - if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) - return; - if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, LPFC_FDMI_PORT_ATTR_num_disc); @@ -2515,7 +2603,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; hsp = (struct serv_parm *)&vport->fc_sparam; - ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) | + ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t) hsp->cmn.bbRcvSizeLsb; ae->un.AttrInt = cpu_to_be32(ae->un.AttrInt); size = FOURBYTES + sizeof(uint32_t); @@ -2556,8 +2644,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue; memset(ae, 0, 256); - snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", - init_utsname()->nodename); + scnprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s", + vport->phba->os_host_name); len = strnlen(ae->un.AttrString, sizeof(ae->un.AttrString)); len += (len & 3) ? (4 - (len & 3)) : 4; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index 1ee857d9d165..819335b16c2e 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -31,6 +31,7 @@ #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/ctype.h> +#include <linux/vmalloc.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -361,7 +362,7 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size) phys = ((uint64_t)hbq_buf->dbuf.phys & 0xffffffff); if (phys == le32_to_cpu(hbqe->bde.addrLow)) { len += scnprintf(buf+len, size-len, - "Buf%d: %p %06x\n", i, + "Buf%d: x%px %06x\n", i, hbq_buf->dbuf.virt, hbq_buf->tag); found = 1; break; @@ -416,8 +417,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) qp = &phba->sli4_hba.hdwq[lpfc_debugfs_last_xripool]; len += scnprintf(buf + len, size - len, "HdwQ %d Info ", i); - spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); - spin_lock(&qp->abts_nvme_buf_list_lock); + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); spin_lock(&qp->io_buf_list_get_lock); spin_lock(&qp->io_buf_list_put_lock); out = qp->total_io_bufs - (qp->get_io_bufs + qp->put_io_bufs + @@ -430,8 +430,7 @@ lpfc_debugfs_commonxripools_data(struct lpfc_hba *phba, char *buf, int size) qp->abts_nvme_io_bufs, out); spin_unlock(&qp->io_buf_list_put_lock); spin_unlock(&qp->io_buf_list_get_lock); - spin_unlock(&qp->abts_nvme_buf_list_lock); - spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); lpfc_debugfs_last_xripool++; if (lpfc_debugfs_last_xripool >= phba->cfg_hdw_queue) @@ -533,9 +532,7 @@ lpfc_debugfs_multixripools_data(struct lpfc_hba *phba, char *buf, int size) continue; pbl_pool = &multixri_pool->pbl_pool; pvt_pool = &multixri_pool->pvt_pool; - txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; - if (qp->nvme_wq) - txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; scnprintf(tmp, sizeof(tmp), "%03d: %4d %4d %4d %4d | %10d %10d ", @@ -2082,8 +2079,49 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, } #endif +static int lpfc_debugfs_ras_log_data(struct lpfc_hba *phba, + char *buffer, int size) +{ + int copied = 0; + struct lpfc_dmabuf *dmabuf, *next; + + memset(buffer, 0, size); + + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + return -EINVAL; + } + spin_unlock_irq(&phba->hbalock); + + list_for_each_entry_safe(dmabuf, next, + &phba->ras_fwlog.fwlog_buff_list, list) { + /* Check if copying will go over size and a '\0' char */ + if ((copied + LPFC_RAS_MAX_ENTRY_SIZE) >= (size - 1)) { + memcpy(buffer + copied, dmabuf->virt, + size - copied - 1); + copied += size - copied - 1; + break; + } + memcpy(buffer + copied, dmabuf->virt, LPFC_RAS_MAX_ENTRY_SIZE); + copied += LPFC_RAS_MAX_ENTRY_SIZE; + } + return copied; +} + +static int +lpfc_debugfs_ras_log_release(struct inode *inode, struct file *file) +{ + struct lpfc_debug *debug = file->private_data; + + vfree(debug->buffer); + kfree(debug); + + return 0; +} + /** - * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer + * lpfc_debugfs_ras_log_open - Open the RAS log debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * @@ -2098,34 +2136,48 @@ lpfc_debugfs_lockstat_write(struct file *file, const char __user *buf, * error value. **/ static int -lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) +lpfc_debugfs_ras_log_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; + int size; int rc = -ENOMEM; + spin_lock_irq(&phba->hbalock); + if (phba->ras_fwlog.state != ACTIVE) { + spin_unlock_irq(&phba->hbalock); + rc = -EINVAL; + goto out; + } + spin_unlock_irq(&phba->hbalock); debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; - /* Round to page boundary */ - debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); - if (!debug->buffer) { - kfree(debug); - goto out; - } + size = LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize; + debug->buffer = vmalloc(size); + if (!debug->buffer) + goto free_debug; - debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, - LPFC_DUMPHBASLIM_SIZE); + debug->len = lpfc_debugfs_ras_log_data(phba, debug->buffer, size); + if (debug->len < 0) { + rc = -EINVAL; + goto free_buffer; + } file->private_data = debug; - rc = 0; + return 0; + +free_buffer: + vfree(debug->buffer); +free_debug: + kfree(debug); out: return rc; } /** - * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer + * lpfc_debugfs_dumpHBASlim_open - Open the Dump HBA SLIM debugfs buffer * @inode: The inode pointer that contains a vport pointer. * @file: The file pointer to attach the log output. * @@ -2140,7 +2192,7 @@ out: * error value. **/ static int -lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) +lpfc_debugfs_dumpHBASlim_open(struct inode *inode, struct file *file) { struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; @@ -2151,44 +2203,14 @@ lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) goto out; /* Round to page boundary */ - debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); - if (!debug->buffer) { - kfree(debug); - goto out; - } - - debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, - LPFC_DUMPHOSTSLIM_SIZE); - file->private_data = debug; - - rc = 0; -out: - return rc; -} - -static int -lpfc_debugfs_dumpData_open(struct inode *inode, struct file *file) -{ - struct lpfc_debug *debug; - int rc = -ENOMEM; - - if (!_dump_buf_data) - return -EBUSY; - - debug = kmalloc(sizeof(*debug), GFP_KERNEL); - if (!debug) - goto out; - - /* Round to page boundary */ - pr_err("9059 BLKGRD: %s: _dump_buf_data=0x%p\n", - __func__, _dump_buf_data); - debug->buffer = _dump_buf_data; + debug->buffer = kmalloc(LPFC_DUMPHBASLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } - debug->len = (1 << _dump_buf_data_order) << PAGE_SHIFT; + debug->len = lpfc_debugfs_dumpHBASlim_data(phba, debug->buffer, + LPFC_DUMPHBASLIM_SIZE); file->private_data = debug; rc = 0; @@ -2196,29 +2218,41 @@ out: return rc; } +/** + * lpfc_debugfs_dumpHostSlim_open - Open the Dump Host SLIM debugfs buffer + * @inode: The inode pointer that contains a vport pointer. + * @file: The file pointer to attach the log output. + * + * Description: + * This routine is the entry point for the debugfs open file operation. It gets + * the vport from the i_private field in @inode, allocates the necessary buffer + * for the log, fills the buffer from the in-memory log for this vport, and then + * returns a pointer to that log in the private_data field in @file. + * + * Returns: + * This function returns zero if successful. On error it will return a negative + * error value. + **/ static int -lpfc_debugfs_dumpDif_open(struct inode *inode, struct file *file) +lpfc_debugfs_dumpHostSlim_open(struct inode *inode, struct file *file) { + struct lpfc_hba *phba = inode->i_private; struct lpfc_debug *debug; int rc = -ENOMEM; - if (!_dump_buf_dif) - return -EBUSY; - debug = kmalloc(sizeof(*debug), GFP_KERNEL); if (!debug) goto out; /* Round to page boundary */ - pr_err("9060 BLKGRD: %s: _dump_buf_dif=0x%p file=%pD\n", - __func__, _dump_buf_dif, file); - debug->buffer = _dump_buf_dif; + debug->buffer = kmalloc(LPFC_DUMPHOSTSLIM_SIZE, GFP_KERNEL); if (!debug->buffer) { kfree(debug); goto out; } - debug->len = (1 << _dump_buf_dif_order) << PAGE_SHIFT; + debug->len = lpfc_debugfs_dumpHostSlim_data(phba, debug->buffer, + LPFC_DUMPHOSTSLIM_SIZE); file->private_data = debug; rc = 0; @@ -2227,29 +2261,6 @@ out: } static ssize_t -lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - /* - * The Data/DIF buffers only save one failing IO - * The write op is used as a reset mechanism after an IO has - * already been saved to the next one can be saved - */ - spin_lock(&_dump_buf_lock); - - memset((void *)_dump_buf_data, 0, - ((1 << PAGE_SHIFT) << _dump_buf_data_order)); - memset((void *)_dump_buf_dif, 0, - ((1 << PAGE_SHIFT) << _dump_buf_dif_order)); - - _dump_buf_done = 0; - - spin_unlock(&_dump_buf_lock); - - return nbytes; -} - -static ssize_t lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) { @@ -2461,17 +2472,6 @@ lpfc_debugfs_release(struct inode *inode, struct file *file) return 0; } -static int -lpfc_debugfs_dumpDataDif_release(struct inode *inode, struct file *file) -{ - struct lpfc_debug *debug = file->private_data; - - debug->buffer = NULL; - kfree(debug); - - return 0; -} - /** * lpfc_debugfs_multixripools_write - Clear multi-XRI pools statistics * @file: The file pointer to read from. @@ -3786,23 +3786,13 @@ lpfc_idiag_wqs_for_cq(struct lpfc_hba *phba, char *wqtype, char *pbuffer, int qidx; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.hdwq[qidx].fcp_wq; + qp = phba->sli4_hba.hdwq[qidx].io_wq; if (qp->assoc_qid != cq_id) continue; *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); if (*len >= max_cnt) return 1; } - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.hdwq[qidx].nvme_wq; - if (qp->assoc_qid != cq_id) - continue; - *len = __lpfc_idiag_print_wq(qp, wqtype, pbuffer, *len); - if (*len >= max_cnt) - return 1; - } - } return 0; } @@ -3868,9 +3858,9 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, struct lpfc_queue *qp; int rc; - qp = phba->sli4_hba.hdwq[eqidx].fcp_cq; + qp = phba->sli4_hba.hdwq[eqidx].io_cq; - *len = __lpfc_idiag_print_cq(qp, "FCP", pbuffer, *len); + *len = __lpfc_idiag_print_cq(qp, "IO", pbuffer, *len); /* Reset max counter */ qp->CQ_max_cqe = 0; @@ -3878,28 +3868,11 @@ lpfc_idiag_cqs_for_eq(struct lpfc_hba *phba, char *pbuffer, if (*len >= max_cnt) return 1; - rc = lpfc_idiag_wqs_for_cq(phba, "FCP", pbuffer, len, + rc = lpfc_idiag_wqs_for_cq(phba, "IO", pbuffer, len, max_cnt, qp->queue_id); if (rc) return 1; - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - qp = phba->sli4_hba.hdwq[eqidx].nvme_cq; - - *len = __lpfc_idiag_print_cq(qp, "NVME", pbuffer, *len); - - /* Reset max counter */ - qp->CQ_max_cqe = 0; - - if (*len >= max_cnt) - return 1; - - rc = lpfc_idiag_wqs_for_cq(phba, "NVME", pbuffer, len, - max_cnt, qp->queue_id); - if (rc) - return 1; - } - if ((eqidx < phba->cfg_nvmet_mrq) && phba->nvmet_support) { /* NVMET CQset */ qp = phba->sli4_hba.nvmet_cqset[eqidx]; @@ -4348,7 +4321,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, if (phba->sli4_hba.hdwq) { for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.hdwq[qidx].fcp_cq; + qp = phba->sli4_hba.hdwq[qidx].io_cq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( @@ -4360,22 +4333,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, } } } - /* NVME complete queue */ - if (phba->sli4_hba.hdwq) { - qidx = 0; - do { - qp = phba->sli4_hba.hdwq[qidx].nvme_cq; - if (qp && qp->queue_id == queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - qp, index, count); - if (rc) - goto error_out; - idiag.ptr_private = qp; - goto pass_check; - } - } while (++qidx < phba->cfg_hdw_queue); - } goto error_out; break; case LPFC_IDIAG_MQ: @@ -4419,20 +4376,7 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf, if (phba->sli4_hba.hdwq) { /* FCP/SCSI work queue */ for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.hdwq[qidx].fcp_wq; - if (qp && qp->queue_id == queid) { - /* Sanity check */ - rc = lpfc_idiag_que_param_check( - qp, index, count); - if (rc) - goto error_out; - idiag.ptr_private = qp; - goto pass_check; - } - } - /* NVME work queue */ - for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - qp = phba->sli4_hba.hdwq[qidx].nvme_wq; + qp = phba->sli4_hba.hdwq[qidx].io_wq; if (qp && qp->queue_id == queid) { /* Sanity check */ rc = lpfc_idiag_que_param_check( @@ -5440,6 +5384,15 @@ static const struct file_operations lpfc_debugfs_op_lockstat = { }; #endif +#undef lpfc_debugfs_ras_log +static const struct file_operations lpfc_debugfs_ras_log = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_ras_log_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_read, + .release = lpfc_debugfs_ras_log_release, +}; + #undef lpfc_debugfs_op_dumpHBASlim static const struct file_operations lpfc_debugfs_op_dumpHBASlim = { .owner = THIS_MODULE, @@ -5508,26 +5461,6 @@ static const struct file_operations lpfc_debugfs_op_cpucheck = { .release = lpfc_debugfs_release, }; -#undef lpfc_debugfs_op_dumpData -static const struct file_operations lpfc_debugfs_op_dumpData = { - .owner = THIS_MODULE, - .open = lpfc_debugfs_dumpData_open, - .llseek = lpfc_debugfs_lseek, - .read = lpfc_debugfs_read, - .write = lpfc_debugfs_dumpDataDif_write, - .release = lpfc_debugfs_dumpDataDif_release, -}; - -#undef lpfc_debugfs_op_dumpDif -static const struct file_operations lpfc_debugfs_op_dumpDif = { - .owner = THIS_MODULE, - .open = lpfc_debugfs_dumpDif_open, - .llseek = lpfc_debugfs_lseek, - .read = lpfc_debugfs_read, - .write = lpfc_debugfs_dumpDataDif_write, - .release = lpfc_debugfs_dumpDataDif_release, -}; - #undef lpfc_debugfs_op_dif_err static const struct file_operations lpfc_debugfs_op_dif_err = { .owner = THIS_MODULE, @@ -5630,7 +5563,6 @@ static const struct file_operations lpfc_idiag_op_extAcc = { .write = lpfc_idiag_extacc_write, .release = lpfc_idiag_cmd_release, }; - #endif /* lpfc_idiag_mbxacc_dump_bsg_mbox - idiag debugfs dump bsg mailbox command @@ -5881,6 +5813,19 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + /* RAS log */ + snprintf(name, sizeof(name), "ras_log"); + phba->debug_ras_log = + debugfs_create_file(name, 0644, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_ras_log); + if (!phba->debug_ras_log) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "6148 Cannot create debugfs" + " ras_log\n"); + goto debug_failed; + } + /* Setup hbqinfo */ snprintf(name, sizeof(name), "hbqinfo"); phba->debug_hbqinfo = @@ -5924,20 +5869,6 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) } else phba->debug_dumpHostSlim = NULL; - /* Setup dumpData */ - snprintf(name, sizeof(name), "dumpData"); - phba->debug_dumpData = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpData); - - /* Setup dumpDif */ - snprintf(name, sizeof(name), "dumpDif"); - phba->debug_dumpDif = - debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, - phba->hba_debugfs_root, - phba, &lpfc_debugfs_op_dumpDif); - /* Setup DIF Error Injections */ snprintf(name, sizeof(name), "InjErrLBA"); phba->debug_InjErrLBA = @@ -6305,6 +6236,9 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_hbqinfo); /* hbqinfo */ phba->debug_hbqinfo = NULL; + debugfs_remove(phba->debug_ras_log); + phba->debug_ras_log = NULL; + #ifdef LPFC_HDWQ_LOCK_STAT debugfs_remove(phba->debug_lockstat); /* lockstat */ phba->debug_lockstat = NULL; @@ -6315,12 +6249,6 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_dumpHostSlim); /* HostSlim */ phba->debug_dumpHostSlim = NULL; - debugfs_remove(phba->debug_dumpData); /* dumpData */ - phba->debug_dumpData = NULL; - - debugfs_remove(phba->debug_dumpDif); /* dumpDif */ - phba->debug_dumpDif = NULL; - debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ phba->debug_InjErrLBA = NULL; @@ -6442,12 +6370,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) lpfc_debug_dump_wq(phba, DUMP_NVMELS, 0); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) - lpfc_debug_dump_wq(phba, DUMP_FCP, idx); - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (idx = 0; idx < phba->cfg_hdw_queue; idx++) - lpfc_debug_dump_wq(phba, DUMP_NVME, idx); - } + lpfc_debug_dump_wq(phba, DUMP_IO, idx); lpfc_debug_dump_hdr_rq(phba); lpfc_debug_dump_dat_rq(phba); @@ -6459,12 +6382,7 @@ lpfc_debug_dump_all_queues(struct lpfc_hba *phba) lpfc_debug_dump_cq(phba, DUMP_NVMELS, 0); for (idx = 0; idx < phba->cfg_hdw_queue; idx++) - lpfc_debug_dump_cq(phba, DUMP_FCP, idx); - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (idx = 0; idx < phba->cfg_hdw_queue; idx++) - lpfc_debug_dump_cq(phba, DUMP_NVME, idx); - } + lpfc_debug_dump_cq(phba, DUMP_IO, idx); /* * Dump Event Queues (EQs) diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h index 34070874616d..20f2537af511 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.h +++ b/drivers/scsi/lpfc/lpfc_debugfs.h @@ -291,8 +291,7 @@ struct lpfc_idiag { #define LPFC_DUMP_MULTIXRIPOOL_SIZE 8192 enum { - DUMP_FCP, - DUMP_NVME, + DUMP_IO, DUMP_MBX, DUMP_ELS, DUMP_NVMELS, @@ -415,12 +414,9 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) struct lpfc_queue *wq; char *qtypestr; - if (qtype == DUMP_FCP) { - wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; - qtypestr = "FCP"; - } else if (qtype == DUMP_NVME) { - wq = phba->sli4_hba.hdwq[wqidx].nvme_wq; - qtypestr = "NVME"; + if (qtype == DUMP_IO) { + wq = phba->sli4_hba.hdwq[wqidx].io_wq; + qtypestr = "IO"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; qtypestr = "MBX"; @@ -433,7 +429,7 @@ lpfc_debug_dump_wq(struct lpfc_hba *phba, int qtype, int wqidx) } else return; - if (qtype == DUMP_FCP || qtype == DUMP_NVME) + if (qtype == DUMP_IO) pr_err("%s WQ: WQ[Idx:%d|Qid:%d]\n", qtypestr, wqidx, wq->queue_id); else @@ -459,17 +455,13 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) char *qtypestr; int eqidx; - /* fcp/nvme wq and cq are 1:1, thus same indexes */ + /* io wq and cq are 1:1, thus same indexes */ eq = NULL; - if (qtype == DUMP_FCP) { - wq = phba->sli4_hba.hdwq[wqidx].fcp_wq; - cq = phba->sli4_hba.hdwq[wqidx].fcp_cq; - qtypestr = "FCP"; - } else if (qtype == DUMP_NVME) { - wq = phba->sli4_hba.hdwq[wqidx].nvme_wq; - cq = phba->sli4_hba.hdwq[wqidx].nvme_cq; - qtypestr = "NVME"; + if (qtype == DUMP_IO) { + wq = phba->sli4_hba.hdwq[wqidx].io_wq; + cq = phba->sli4_hba.hdwq[wqidx].io_cq; + qtypestr = "IO"; } else if (qtype == DUMP_MBX) { wq = phba->sli4_hba.mbx_wq; cq = phba->sli4_hba.mbx_cq; @@ -496,7 +488,7 @@ lpfc_debug_dump_cq(struct lpfc_hba *phba, int qtype, int wqidx) eq = phba->sli4_hba.hdwq[0].hba_eq; } - if (qtype == DUMP_FCP || qtype == DUMP_NVME) + if (qtype == DUMP_IO) pr_err("%s CQ: WQ[Idx:%d|Qid%d]->CQ[Idx%d|Qid%d]" "->EQ[Idx:%d|Qid:%d]:\n", qtypestr, wqidx, wq->queue_id, wqidx, cq->queue_id, @@ -572,20 +564,11 @@ lpfc_debug_dump_wq_by_id(struct lpfc_hba *phba, int qid) int wq_idx; for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) - if (phba->sli4_hba.hdwq[wq_idx].fcp_wq->queue_id == qid) + if (phba->sli4_hba.hdwq[wq_idx].io_wq->queue_id == qid) break; if (wq_idx < phba->cfg_hdw_queue) { - pr_err("FCP WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].fcp_wq); - return; - } - - for (wq_idx = 0; wq_idx < phba->cfg_hdw_queue; wq_idx++) - if (phba->sli4_hba.hdwq[wq_idx].nvme_wq->queue_id == qid) - break; - if (wq_idx < phba->cfg_hdw_queue) { - pr_err("NVME WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].nvme_wq); + pr_err("IO WQ[Idx:%d|Qid:%d]\n", wq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[wq_idx].io_wq); return; } @@ -654,22 +637,12 @@ lpfc_debug_dump_cq_by_id(struct lpfc_hba *phba, int qid) int cq_idx; for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) - if (phba->sli4_hba.hdwq[cq_idx].fcp_cq->queue_id == qid) - break; - - if (cq_idx < phba->cfg_hdw_queue) { - pr_err("FCP CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].fcp_cq); - return; - } - - for (cq_idx = 0; cq_idx < phba->cfg_hdw_queue; cq_idx++) - if (phba->sli4_hba.hdwq[cq_idx].nvme_cq->queue_id == qid) + if (phba->sli4_hba.hdwq[cq_idx].io_cq->queue_id == qid) break; if (cq_idx < phba->cfg_hdw_queue) { - pr_err("NVME CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); - lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].nvme_cq); + pr_err("IO CQ[Idx:%d|Qid:%d]\n", cq_idx, qid); + lpfc_debug_dump_q(phba->sli4_hba.hdwq[cq_idx].io_cq); return; } diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index 1c89c9f314fa..482e4a888dae 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -112,6 +112,8 @@ struct lpfc_nodelist { uint8_t nlp_retry; /* used for ELS retries */ uint8_t nlp_fcp_info; /* class info, bits 0-3 */ #define NLP_FCP_2_DEVICE 0x10 /* FCP-2 device */ + u8 nlp_nvme_info; /* NVME NSLER Support */ +#define NLP_NVME_NSLER 0x1 /* NVME NSLER device */ uint16_t nlp_usg_map; /* ndlp management usage bitmap */ #define NLP_USG_NODE_ACT_BIT 0x1 /* Indicate ndlp is actively used */ @@ -157,6 +159,7 @@ struct lpfc_node_rrq { /* Defines for nlp_flag (uint32) */ #define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ #define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ +#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ #define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ #define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ #define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index f12780f4cfbb..42a2bf38eaea 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -1052,17 +1052,18 @@ stop_rr_fcf_flogi: if (lpfc_els_retry(phba, cmdiocb, rspiocb)) goto out; + lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, + "0150 FLOGI failure Status:x%x/x%x " + "xri x%x TMO:x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4], + cmdiocb->sli4_xritag, irsp->ulpTimeout); + /* If this is not a loop open failure, bail out */ if (!(irsp->ulpStatus == IOSTAT_LOCAL_REJECT && ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) == IOERR_LOOP_OPEN_FAILURE))) goto flogifail; - lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS, - "0150 FLOGI failure Status:x%x/x%x xri x%x TMO:x%x\n", - irsp->ulpStatus, irsp->un.ulpWord[4], - cmdiocb->sli4_xritag, irsp->ulpTimeout); - /* FLOGI failed, so there is no fabric */ spin_lock_irq(shost->host_lock); vport->fc_flag &= ~(FC_FABRIC | FC_PUBLIC_LOOP); @@ -1207,6 +1208,39 @@ out: } /** + * lpfc_cmpl_els_link_down - Completion callback function for ELS command + * aborted during a link down + * @phba: pointer to lpfc hba data structure. + * @cmdiocb: pointer to lpfc command iocb data structure. + * @rspiocb: pointer to lpfc response iocb data structure. + * + */ +static void +lpfc_cmpl_els_link_down(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, + struct lpfc_iocbq *rspiocb) +{ + IOCB_t *irsp; + uint32_t *pcmd; + uint32_t cmd; + + pcmd = (uint32_t *)(((struct lpfc_dmabuf *)cmdiocb->context2)->virt); + cmd = *pcmd; + irsp = &rspiocb->iocb; + + lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + "6445 ELS completes after LINK_DOWN: " + " Status %x/%x cmd x%x flg x%x\n", + irsp->ulpStatus, irsp->un.ulpWord[4], cmd, + cmdiocb->iocb_flag); + + if (cmdiocb->iocb_flag & LPFC_IO_FABRIC) { + cmdiocb->iocb_flag &= ~LPFC_IO_FABRIC; + atomic_dec(&phba->fabric_iocb_count); + } + lpfc_els_free_iocb(phba, cmdiocb); +} + +/** * lpfc_issue_els_flogi - Issue an flogi iocb command for a vport * @vport: pointer to a host virtual N_Port data structure. * @ndlp: pointer to a node-list data structure. @@ -2107,7 +2141,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) !(vport->fc_flag & FC_OFFLINE_MODE)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x Data: %p\n", + "on NPort x%x rpi x%x Data: x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp); @@ -2202,6 +2236,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct Scsi_Host *shost = lpfc_shost_from_vport(vport); IOCB_t *irsp; struct lpfc_nodelist *ndlp; + char *mode; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->context_un.rsp_iocb = rspiocb; @@ -2239,8 +2274,17 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; } + /* If we don't send GFT_ID to Fabric, a PRLI error + * could be expected. + */ + if ((vport->fc_flag & FC_FABRIC) || + (vport->cfg_enable_fc4_type != LPFC_ENABLE_BOTH)) + mode = KERN_ERR; + else + mode = KERN_INFO; + /* PRLI failed */ - lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, + lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI failure DID:%06X Status:x%x/x%x, " "data: x%x\n", ndlp->nlp_DID, irsp->ulpStatus, @@ -2401,6 +2445,10 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, npr_nvme = (struct lpfc_nvme_prli *)pcmd; bf_set(prli_type_code, npr_nvme, PRLI_NVME_TYPE); bf_set(prli_estabImagePair, npr_nvme, 0); /* Should be 0 */ + if (phba->nsler) { + bf_set(prli_nsler, npr_nvme, 1); + bf_set(prli_conf, npr_nvme, 1); + } /* Only initiators request first burst. */ if ((phba->cfg_nvme_enable_fb) && @@ -4203,7 +4251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) mempool_free(pmb, phba->mbox_mem_pool); if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi%x DID:%x flg:%x %d map:%x %p\n", + "0006 rpi%x DID:%x flg:%x %d map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -4253,6 +4301,11 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, irsp = &rspiocb->iocb; + if (!vport) { + lpfc_printf_log(phba, KERN_ERR, LOG_ELS, + "3177 ELS response failed\n"); + goto out; + } if (cmdiocb->context_un.mbox) mbox = cmdiocb->context_un.mbox; @@ -4392,7 +4445,7 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, mempool_free(mbox, phba->mbox_mem_pool); } out: - if (ndlp && NLP_CHK_NODE_ACT(ndlp)) { + if (ndlp && NLP_CHK_NODE_ACT(ndlp) && shost) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag &= ~(NLP_ACC_REGLOGIN | NLP_RM_DFLT_RPI); spin_unlock_irq(shost->host_lock); @@ -5222,6 +5275,11 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) } } } + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6452 Discover PLOGI %d flag x%x\n", + sentplogi, vport->fc_flag); + if (sentplogi) { lpfc_set_disctmo(vport); } @@ -5634,16 +5692,16 @@ lpfc_rdp_res_attach_port_names(struct fc_rdp_port_name_desc *desc, desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG); if (vport->fc_flag & FC_FABRIC) { memcpy(desc->port_names.wwnn, &vport->fabric_nodename, - sizeof(desc->port_names.wwnn)); + sizeof(desc->port_names.wwnn)); memcpy(desc->port_names.wwpn, &vport->fabric_portname, - sizeof(desc->port_names.wwpn)); + sizeof(desc->port_names.wwpn)); } else { /* Point to Point */ memcpy(desc->port_names.wwnn, &ndlp->nlp_nodename, - sizeof(desc->port_names.wwnn)); + sizeof(desc->port_names.wwnn)); - memcpy(desc->port_names.wwnn, &ndlp->nlp_portname, - sizeof(desc->port_names.wwpn)); + memcpy(desc->port_names.wwpn, &ndlp->nlp_portname, + sizeof(desc->port_names.wwpn)); } desc->length = cpu_to_be32(sizeof(desc->port_names)); @@ -6327,7 +6385,11 @@ lpfc_rscn_recovery_check(struct lpfc_vport *vport) continue; } - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) + /* Check to see if we need to NVME rescan this target + * remoteport. + */ + if (ndlp->nlp_fc4_type & NLP_FC4_NVME && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) lpfc_nvme_rescan_port(vport, ndlp); lpfc_disc_state_machine(vport, ndlp, NULL, @@ -6413,7 +6475,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, uint32_t payload_len, length, nportid, *cmd; int rscn_cnt; int rscn_id = 0, hba_id = 0; - int i; + int i, tmo; pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; lp = (uint32_t *) pcmd->virt; @@ -6441,7 +6503,11 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, *lp, vport->fc_flag, payload_len); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if (ndlp->nlp_fc4_type & NLP_FC4_NVME) + /* Check to see if we need to NVME rescan this target + * remoteport. + */ + if (ndlp->nlp_fc4_type & NLP_FC4_NVME && + ndlp->nlp_type & (NLP_NVME_TARGET | NLP_NVME_DISCOVERY)) lpfc_nvme_rescan_port(vport, ndlp); return 0; } @@ -6515,6 +6581,13 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, spin_lock_irq(shost->host_lock); vport->fc_flag |= FC_RSCN_DEFERRED; + + /* Restart disctmo if its already running */ + if (vport->fc_flag & FC_DISC_TMO) { + tmo = ((phba->fc_ratov * 3) + 3); + mod_timer(&vport->fc_disctmo, + jiffies + msecs_to_jiffies(1000 * tmo)); + } if ((rscn_cnt < FC_MAX_HOLD_RSCN) && !(vport->fc_flag & FC_RSCN_DISCOVERY)) { vport->fc_flag |= FC_RSCN_MODE; @@ -6617,9 +6690,10 @@ lpfc_els_handle_rscn(struct lpfc_vport *vport) /* RSCN processed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0215 RSCN processed Data: x%x x%x x%x x%x\n", + "0215 RSCN processed Data: x%x x%x x%x x%x x%x x%x\n", vport->fc_flag, 0, vport->fc_rscn_id_cnt, - vport->port_state); + vport->port_state, vport->num_disc_nodes, + vport->gidft_inp); /* To process RSCN, first compare RSCN data with NameServer */ vport->fc_ns_retry = 0; @@ -7940,53 +8014,83 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) struct lpfc_sli_ring *pring; struct lpfc_iocbq *tmp_iocb, *piocb; IOCB_t *cmd = NULL; + unsigned long iflags = 0; lpfc_fabric_abort_vport(vport); + /* * For SLI3, only the hbalock is required. But SLI4 needs to coordinate * with the ring insert operation. Because lpfc_sli_issue_abort_iotag * ultimately grabs the ring_lock, the driver must splice the list into * a working list and release the locks before calling the abort. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); pring = lpfc_phba_elsring(phba); /* Bail out if we've no ELS wq, like in PCI error recovery case. */ if (unlikely(!pring)) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); return; } if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); + /* First we need to issue aborts to outstanding cmds on txcmpl */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txcmplq, list) { if (piocb->iocb_flag & LPFC_IO_LIBDFC) continue; if (piocb->vport != vport) continue; - list_add_tail(&piocb->dlist, &abort_list); + + if (piocb->iocb_flag & LPFC_DRIVER_ABORTED) + continue; + + /* On the ELS ring we can have ELS_REQUESTs or + * GEN_REQUESTs waiting for a response. + */ + cmd = &piocb->iocb; + if (cmd->ulpCommand == CMD_ELS_REQUEST64_CR) { + list_add_tail(&piocb->dlist, &abort_list); + + /* If the link is down when flushing ELS commands + * the firmware will not complete them till after + * the link comes back up. This may confuse + * discovery for the new link up, so we need to + * change the compl routine to just clean up the iocb + * and avoid any retry logic. + */ + if (phba->link_state == LPFC_LINK_DOWN) + piocb->iocb_cmpl = lpfc_cmpl_els_link_down; + } + if (cmd->ulpCommand == CMD_GEN_REQUEST64_CR) + list_add_tail(&piocb->dlist, &abort_list); } + if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); - /* Abort each iocb on the aborted list and remove the dlist links. */ + spin_unlock_irqrestore(&phba->hbalock, iflags); + + /* Abort each txcmpl iocb on aborted list and remove the dlist links. */ list_for_each_entry_safe(piocb, tmp_iocb, &abort_list, dlist) { - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); list_del_init(&piocb->dlist); lpfc_sli_issue_abort_iotag(phba, pring, piocb); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); } if (!list_empty(&abort_list)) lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "3387 abort list for txq not empty\n"); INIT_LIST_HEAD(&abort_list); - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); if (phba->sli_rev == LPFC_SLI_REV4) spin_lock(&pring->ring_lock); + /* No need to abort the txq list, + * just queue them up for lpfc_sli_cancel_iocbs + */ list_for_each_entry_safe(piocb, tmp_iocb, &pring->txq, list) { cmd = &piocb->iocb; @@ -8007,11 +8111,22 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport) list_del_init(&piocb->list); list_add_tail(&piocb->list, &abort_list); } + + /* The same holds true for any FLOGI/FDISC on the fabric_iocb_list */ + if (vport == phba->pport) { + list_for_each_entry_safe(piocb, tmp_iocb, + &phba->fabric_iocb_list, list) { + cmd = &piocb->iocb; + list_del_init(&piocb->list); + list_add_tail(&piocb->list, &abort_list); + } + } + if (phba->sli_rev == LPFC_SLI_REV4) spin_unlock(&pring->ring_lock); - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); - /* Cancell all the IOCBs from the completions list */ + /* Cancel all the IOCBs from the completions list */ lpfc_sli_cancel_iocbs(phba, &abort_list, IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED); diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 28ecaa7fc715..dcc8999c6a68 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -28,6 +28,7 @@ #include <linux/kthread.h> #include <linux/interrupt.h> #include <linux/lockdep.h> +#include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -118,6 +119,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_work_evt *evtp; int put_node; int put_rport; + unsigned long iflags; rdata = rport->dd_data; ndlp = rdata->pnode; @@ -132,7 +134,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport %p flg x%x\n", + "3181 dev_loss_callbk x%06x, rport x%px flg x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); /* Don't defer this if we are in the process of deleting the vport @@ -170,22 +172,22 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } shost = lpfc_shost_from_vport(vport); - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); ndlp->nlp_flag |= NLP_IN_DEV_LOSS; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); /* We need to hold the node by incrementing the reference * count until this queued work is done */ evtp->evt_arg1 = lpfc_nlp_get(ndlp); - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); if (evtp->evt_arg1) { evtp->evt = LPFC_EVT_DEV_LOSS; list_add_tail(&evtp->evt_listp, &phba->work_list); lpfc_worker_wake_up(phba); } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); return; } @@ -212,14 +214,15 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) int put_node; int warn_on = 0; int fcf_inuse = 0; + unsigned long iflags; rport = ndlp->rport; vport = ndlp->vport; shost = lpfc_shost_from_vport(vport); - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); if (!rport) return fcf_inuse; @@ -235,7 +238,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 dev_loss_tmo_handler x%06x, rport %p flg x%x\n", + "3182 dev_loss_tmo_handler x%06x, rport x%px flg x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag); /* @@ -698,7 +701,10 @@ lpfc_work_done(struct lpfc_hba *phba) if (!(phba->hba_flag & HBA_SP_QUEUE_EVT)) set_bit(LPFC_DATA_READY, &phba->data_flags); } else { - if (phba->link_state >= LPFC_LINK_UP || + /* Driver could have abort request completed in queue + * when link goes down. Allow for this transition. + */ + if (phba->link_state >= LPFC_LINK_DOWN || phba->link_flag & LS_MDS_LOOPBACK) { pring->flag &= ~LPFC_DEFERRED_RING_EVENT; lpfc_sli_handle_slow_ring_event(phba, pring, @@ -903,6 +909,8 @@ lpfc_linkdown(struct lpfc_hba *phba) phba->trunk_link.link1.state = 0; phba->trunk_link.link2.state = 0; phba->trunk_link.link3.state = 0; + phba->sli4_hba.link_state.logical_speed = + LPFC_LINK_SPEED_UNKNOWN; } spin_lock_irq(shost->host_lock); phba->pport->fc_flag &= ~FC_LBIT; @@ -1131,7 +1139,6 @@ void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; - uint8_t bbscn = 0; if (pmb->u.mb.mbxStatus) goto out; @@ -1158,17 +1165,11 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Start discovery by sending a FLOGI. port_state is identically * LPFC_FLOGI while waiting for FLOGI cmpl */ - if (vport->port_state != LPFC_FLOGI) { - if (phba->bbcredit_support && phba->cfg_enable_bbcr) { - bbscn = bf_get(lpfc_bbscn_def, - &phba->sli4_hba.bbscn_params); - vport->fc_sparam.cmn.bbRcvSizeMsb &= 0xf; - vport->fc_sparam.cmn.bbRcvSizeMsb |= (bbscn << 4); - } + if (vport->port_state != LPFC_FLOGI) lpfc_initial_flogi(vport); - } else if (vport->fc_flag & FC_PT2PT) { + else if (vport->fc_flag & FC_PT2PT) lpfc_disc_start(vport); - } + return; out: @@ -3115,8 +3116,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) int rc; struct fcf_record *fcf_record; uint32_t fc_flags = 0; + unsigned long iflags; - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); phba->fc_linkspeed = bf_get(lpfc_mbx_read_top_link_spd, la); if (!(phba->hba_flag & HBA_FCOE_MODE)) { @@ -3213,12 +3215,12 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) vport->fc_myDID = phba->fc_pref_DID; fc_flags |= FC_LBIT; } - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); if (fc_flags) { - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); vport->fc_flag |= fc_flags; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); } lpfc_linkup(phba); @@ -3292,33 +3294,37 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) * The driver is expected to do FIP/FCF. Call the port * and get the FCF Table. */ - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); if (phba->hba_flag & FCF_TS_INPROG) { - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); return; } /* This is the initial FCF discovery scan */ phba->fcf.fcf_flag |= FCF_INIT_DISC; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY, "2778 Start FCF table scan at linkup\n"); rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST); if (rc) { - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); phba->fcf.fcf_flag &= ~FCF_INIT_DISC; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); goto out; } /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); } + /* Prepare for LINK up registrations */ + memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); + scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", + init_utsname()->nodename); return; out: lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, - "0263 Discovery Mailbox error: state: 0x%x : %p %p\n", + "0263 Discovery Mailbox error: state: 0x%x : x%px x%px\n", vport->port_state, sparam_mbox, cfglink_mbox); lpfc_issue_clear_la(phba, vport); return; @@ -3366,6 +3372,7 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) MAILBOX_t *mb = &pmb->u.mb; struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *)(pmb->ctx_buf); uint8_t attn_type; + unsigned long iflags; /* Unblock ELS traffic */ pring = lpfc_phba_elsring(phba); @@ -3387,12 +3394,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) memcpy(&phba->alpa_map[0], mp->virt, 128); - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); if (bf_get(lpfc_mbx_read_top_pb, la)) vport->fc_flag |= FC_BYPASSED_MODE; else vport->fc_flag &= ~FC_BYPASSED_MODE; - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); if (phba->fc_eventTag <= la->eventTag) { phba->fc_stat.LinkMultiEvent++; @@ -3403,12 +3410,12 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->fc_eventTag = la->eventTag; if (phba->sli_rev < LPFC_SLI_REV4) { - spin_lock_irq(&phba->hbalock); + spin_lock_irqsave(&phba->hbalock, iflags); if (bf_get(lpfc_mbx_read_top_mm, la)) phba->sli.sli_flag |= LPFC_MENLO_MAINT; else phba->sli.sli_flag &= ~LPFC_MENLO_MAINT; - spin_unlock_irq(&phba->hbalock); + spin_unlock_irqrestore(&phba->hbalock, iflags); } phba->link_events++; @@ -3450,8 +3457,8 @@ lpfc_mbx_cmpl_read_topology(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) phba->pport->port_state, vport->fc_flag); else if (attn_type == LPFC_ATT_UNEXP_WWPN) lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT, - "1313 Link Down UNEXP WWPN Event x%x received " - "Data: x%x x%x x%x x%x x%x\n", + "1313 Link Down Unexpected FA WWPN Event x%x " + "received Data: x%x x%x x%x x%x x%x\n", la->eventTag, phba->fc_eventTag, phba->pport->port_state, vport->fc_flag, bf_get(lpfc_mbx_read_top_mm, la), @@ -3529,7 +3536,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, - "0002 rpi:%x DID:%x flg:%x %d map:%x %p\n", + "0002 rpi:%x DID:%x flg:%x %d map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -4040,8 +4047,8 @@ out: ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, - "0003 rpi:%x DID:%x flg:%x %d map%x %p\n", + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0003 rpi:%x DID:%x flg:%x %d map%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -4160,7 +4167,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) fc_remote_port_rolechg(rport, rport_ids.roles); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3183 rport register x%06x, rport %p role x%x\n", + "3183 rport register x%06x, rport x%px role x%x\n", ndlp->nlp_DID, rport, rport_ids.roles); if ((rport->scsi_target_id != -1) && @@ -4184,7 +4191,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "3184 rport unregister x%06x, rport %p\n", + "3184 rport unregister x%06x, rport x%px\n", ndlp->nlp_DID, rport); fc_remote_port_delete(rport); @@ -4196,8 +4203,9 @@ static void lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); + unsigned long iflags; - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, iflags); switch (state) { case NLP_STE_UNUSED_NODE: vport->fc_unused_cnt += count; @@ -4227,7 +4235,7 @@ lpfc_nlp_counters(struct lpfc_vport *vport, int state, int count) vport->fc_npr_cnt += count; break; } - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, iflags); } static void @@ -4480,9 +4488,21 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return NULL; if (phba->sli_rev == LPFC_SLI_REV4) { - rpi = lpfc_sli4_alloc_rpi(vport->phba); - if (rpi == LPFC_RPI_ALLOC_ERROR) + if (ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR) + rpi = lpfc_sli4_alloc_rpi(vport->phba); + else + rpi = ndlp->nlp_rpi; + + if (rpi == LPFC_RPI_ALLOC_ERROR) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + "0359 %s: ndlp:x%px " + "usgmap:x%x refcnt:%d FAILED RPI " + " ALLOC\n", + __func__, + (void *)ndlp, ndlp->nlp_usg_map, + kref_read(&ndlp->kref)); return NULL; + } } spin_lock_irqsave(&phba->ndlp_lock, flags); @@ -4490,9 +4510,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (NLP_CHK_FREE_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, - "0277 lpfc_enable_node: ndlp:x%p " + "0277 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); goto free_rpi; } @@ -4500,9 +4520,9 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (NLP_CHK_NODE_ACT(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, - "0278 lpfc_enable_node: ndlp:x%p " + "0278 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); goto free_rpi; } @@ -4532,7 +4552,7 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_rpi = rpi; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0008 rpi:%x DID:%x flg:%x refcnt:%d " - "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, + "map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -4541,6 +4561,14 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (state != NLP_STE_UNUSED_NODE) lpfc_nlp_set_state(vport, ndlp, state); + else + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, + "0013 rpi:%x DID:%x flg:%x refcnt:%d " + "map:%x x%px STATE=UNUSED\n", + ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, + kref_read(&ndlp->kref), + ndlp->nlp_usg_map, ndlp); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_NODE, "node enable: did:x%x", @@ -4548,8 +4576,10 @@ lpfc_enable_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp; free_rpi: - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli4_free_rpi(vport->phba, rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } return NULL; } @@ -4797,7 +4827,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " - "on NPort x%x Data: x%x %p\n", + "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); @@ -4805,11 +4835,54 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + } ndlp->nlp_flag &= ~NLP_UNREG_INP; } } /* + * Sets the mailbox completion handler to be used for the + * unreg_rpi command. The handler varies based on the state of + * the port and what will be happening to the rpi next. + */ +static void +lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, + struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) +{ + unsigned long iflags; + + if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + mbox->ctx_ndlp = ndlp; + mbox->mbox_cmpl = lpfc_nlp_logo_unreg; + + } else if (phba->sli_rev == LPFC_SLI_REV4 && + (!(vport->load_flag & FC_UNLOADING)) && + (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= + LPFC_SLI_INTF_IF_TYPE_2) && + (kref_read(&ndlp->kref) > 0)) { + mbox->ctx_ndlp = lpfc_nlp_get(ndlp); + mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; + } else { + if (vport->load_flag & FC_UNLOADING) { + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_lock_irqsave(&vport->phba->ndlp_lock, + iflags); + ndlp->nlp_flag |= NLP_RELEASE_RPI; + spin_unlock_irqrestore(&vport->phba->ndlp_lock, + iflags); + } + lpfc_nlp_get(ndlp); + } + mbox->ctx_ndlp = ndlp; + mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; + } +} + +/* * Free rpi associated with LPFC_NODELIST entry. * This routine is called from lpfc_freenode(), when we are removing * a LPFC_NODELIST entry. It is also called if the driver initiates a @@ -4829,7 +4902,8 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (ndlp->nlp_flag & NLP_RPI_REGISTERED || ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " "unregistered nlp_flag x%x " "did x%x\n", @@ -4840,10 +4914,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * no need to queue up another one. */ if (ndlp->nlp_flag & NLP_UNREG_INP) { - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%x " - "Data: %p\n", + "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); @@ -4859,41 +4934,22 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_unreg_login(phba, vport->vpi, rpi, mbox); mbox->vport = vport; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { - mbox->ctx_ndlp = ndlp; - mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else { - if (phba->sli_rev == LPFC_SLI_REV4 && - (!(vport->load_flag & FC_UNLOADING)) && - (bf_get(lpfc_sli_intf_if_type, - &phba->sli4_hba.sli_intf) >= - LPFC_SLI_INTF_IF_TYPE_2) && - (kref_read(&ndlp->kref) > 0)) { - mbox->ctx_ndlp = lpfc_nlp_get(ndlp); - mbox->mbox_cmpl = - lpfc_sli4_unreg_rpi_cmpl_clr; - /* - * accept PLOGIs after unreg_rpi_cmpl - */ - acc_plogi = 0; - } else if (vport->load_flag & FC_UNLOADING) { - mbox->ctx_ndlp = NULL; - mbox->mbox_cmpl = - lpfc_sli_def_mbox_cmpl; - } else { - mbox->ctx_ndlp = ndlp; - mbox->mbox_cmpl = - lpfc_sli_def_mbox_cmpl; - } - } + lpfc_set_unreg_login_mbx_cmpl(phba, vport, ndlp, mbox); + if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) + /* + * accept PLOGIs after unreg_rpi_cmpl + */ + acc_plogi = 0; if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!(vport->fc_flag & FC_OFFLINE_MODE))) ndlp->nlp_flag |= NLP_UNREG_INP; - lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x Data:%p\n", + "NPort x%x deferred flg x%x " + "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5025,6 +5081,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) struct lpfc_hba *phba = vport->phba; LPFC_MBOXQ_t *mb, *nextmb; struct lpfc_dmabuf *mp; + unsigned long iflags; /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -5034,16 +5091,16 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_state, ndlp->nlp_rpi); if (NLP_CHK_FREE_REQ(ndlp)) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, - "0280 lpfc_cleanup_node: ndlp:x%p " + "0280 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); lpfc_dequeue_node(vport, ndlp); } else { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, - "0281 lpfc_cleanup_node: ndlp:x%p " + "0281 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); lpfc_disable_node(vport, ndlp); } @@ -5104,8 +5161,22 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->els_retry_evt.evt_listp); list_del_init(&ndlp->dev_loss_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - lpfc_unreg_rpi(vport, ndlp); - + if (phba->sli_rev == LPFC_SLI_REV4) + ndlp->nlp_flag |= NLP_RELEASE_RPI; + if (!lpfc_unreg_rpi(vport, ndlp)) { + /* Clean up unregistered and non freed rpis */ + if ((ndlp->nlp_flag & NLP_RELEASE_RPI) && + !(ndlp->nlp_rpi == LPFC_RPI_ALLOC_ERROR)) { + lpfc_sli4_free_rpi(vport->phba, + ndlp->nlp_rpi); + spin_lock_irqsave(&vport->phba->ndlp_lock, + iflags); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irqrestore(&vport->phba->ndlp_lock, + iflags); + } + } return 0; } @@ -5131,8 +5202,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* For this case we need to cleanup the default rpi * allocated by the firmware. */ - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0005 rpi:%x DID:%x flg:%x %d map:%x %p\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0005 Cleanup Default rpi:x%x DID:x%x flg:x%x " + "ref %d map:x%x ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -5168,9 +5241,10 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * for registered rport so need to cleanup rport */ lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, - "0940 removed node x%p DID x%x " - " rport not null %p\n", - ndlp, ndlp->nlp_DID, ndlp->rport); + "0940 removed node x%px DID x%x " + "rpi %d rport not null x%px\n", + ndlp, ndlp->nlp_DID, ndlp->nlp_rpi, + ndlp->rport); rport = ndlp->rport; rdata = rport->dd_data; rdata->pnode = NULL; @@ -5243,15 +5317,15 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (lpfc_matchdid(vport, ndlp, did)) { - data1 = (((uint32_t) ndlp->nlp_state << 24) | - ((uint32_t) ndlp->nlp_xri << 16) | - ((uint32_t) ndlp->nlp_type << 8) | - ((uint32_t) ndlp->nlp_rpi & 0xff)); + data1 = (((uint32_t)ndlp->nlp_state << 24) | + ((uint32_t)ndlp->nlp_xri << 16) | + ((uint32_t)ndlp->nlp_type << 8) | + ((uint32_t)ndlp->nlp_usg_map & 0xff)); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0929 FIND node DID " - "Data: x%p x%x x%x x%x %p\n", + "Data: x%px x%x x%x x%x x%x x%px\n", ndlp, ndlp->nlp_DID, - ndlp->nlp_flag, data1, + ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); return ndlp; } @@ -5296,7 +5370,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) spin_unlock_irqrestore(shost->host_lock, iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "2025 FIND node DID " - "Data: x%p x%x x%x x%x %p\n", + "Data: x%px x%x x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5328,6 +5402,13 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (!ndlp) return NULL; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6453 Setup New Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5336,8 +5417,17 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (vport->phba->nvmet_support) return NULL; ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_NPR_NODE); - if (!ndlp) + if (!ndlp) { + lpfc_printf_vlog(vport, KERN_WARNING, LOG_SLI, + "0014 Could not enable ndlp\n"); return NULL; + } + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6454 Setup Enabled Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); @@ -5357,6 +5447,12 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) */ lpfc_cancel_retry_delay_tmo(vport, ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6455 Setup RSCN Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + /* NVME Target mode waits until rport is known to be * impacted by the RSCN before it transitions. No * active management - just go to NPR provided the @@ -5368,15 +5464,32 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) /* If we've already received a PLOGI from this NPort * we don't need to try to discover it again. */ - if (ndlp->nlp_flag & NLP_RCV_PLOGI) + if (ndlp->nlp_flag & NLP_RCV_PLOGI && + !(ndlp->nlp_type & + (NLP_FCP_TARGET | NLP_NVME_TARGET))) return NULL; + ndlp->nlp_prev_state = ndlp->nlp_state; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_2B_DISC; spin_unlock_irq(shost->host_lock); - } else + } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6456 Skip Setup RSCN Node x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); ndlp = NULL; + } } else { + lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, + "6457 Setup Active Node 2B_DISC x%x " + "Data:x%x x%x x%x\n", + ndlp->nlp_DID, ndlp->nlp_flag, + ndlp->nlp_state, vport->fc_flag); + /* If the initiator received a PLOGI from this NPort or if the * initiator is already in the process of discovery on it, * there's no need to try to discover it again. @@ -5528,10 +5641,10 @@ lpfc_disc_start(struct lpfc_vport *vport) /* Start Discovery state <hba_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, - "0202 Start Discovery hba state x%x " - "Data: x%x x%x x%x\n", + "0202 Start Discovery port state x%x " + "flg x%x Data: x%x x%x x%x\n", vport->port_state, vport->fc_flag, vport->fc_plogi_cnt, - vport->fc_adisc_cnt); + vport->fc_adisc_cnt, vport->fc_npr_cnt); /* First do ADISCs - if any */ num_sent = lpfc_els_disc_adisc(vport); @@ -5959,8 +6072,8 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_flag |= NLP_RPI_REGISTERED; ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); - lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, - "0004 rpi:%x DID:%x flg:%x %d map:%x %p\n", + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0004 rpi:%x DID:%x flg:%x %d map:%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp->nlp_usg_map, ndlp); @@ -6014,8 +6127,8 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "3185 FIND node filter %p DID " - "ndlp %p did x%x flg x%x st x%x " + "3185 FIND node filter %ps DID " + "ndlp x%px did x%x flg x%x st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6025,7 +6138,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) } } lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "3186 FIND node filter %p NOT FOUND.\n", filter); + "3186 FIND node filter %ps NOT FOUND.\n", filter); return NULL; } @@ -6065,10 +6178,11 @@ lpfc_findnode_rpi(struct lpfc_vport *vport, uint16_t rpi) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); struct lpfc_nodelist *ndlp; + unsigned long flags; - spin_lock_irq(shost->host_lock); + spin_lock_irqsave(shost->host_lock, flags); ndlp = __lpfc_findnode_rpi(vport, rpi); - spin_unlock_irq(shost->host_lock); + spin_unlock_irqrestore(shost->host_lock, flags); return ndlp; } @@ -6147,12 +6261,12 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, - "0007 rpi:%x DID:%x flg:%x refcnt:%d " - "map:%x %p\n", ndlp->nlp_rpi, ndlp->nlp_DID, - ndlp->nlp_flag, - kref_read(&ndlp->kref), - ndlp->nlp_usg_map, ndlp); + lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:%x " + "flg:x%x refcnt:%d map:x%x\n", + ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, + ndlp->nlp_flag, kref_read(&ndlp->kref), + ndlp->nlp_usg_map); ndlp->active_rrqs_xri_bitmap = mempool_alloc(vport->phba->active_rrq_pool, @@ -6187,8 +6301,9 @@ lpfc_nlp_release(struct kref *kref) ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0279 lpfc_nlp_release: ndlp:x%p did %x " + "0279 %s: ndlp:x%px did %x " "usgmap:x%x refcnt:%d rpi:%x\n", + __func__, (void *)ndlp, ndlp->nlp_DID, ndlp->nlp_usg_map, kref_read(&ndlp->kref), ndlp->nlp_rpi); @@ -6200,8 +6315,6 @@ lpfc_nlp_release(struct kref *kref) spin_lock_irqsave(&phba->ndlp_lock, flags); NLP_CLR_NODE_ACT(ndlp); spin_unlock_irqrestore(&phba->ndlp_lock, flags); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); /* free ndlp memory for final ndlp release */ if (NLP_CHK_FREE_REQ(ndlp)) { @@ -6237,9 +6350,9 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (!NLP_CHK_NODE_ACT(ndlp) || NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, - "0276 lpfc_nlp_get: ndlp:x%p " + "0276 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); return NULL; } else @@ -6265,9 +6378,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) return 1; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", - ndlp->nlp_DID, ndlp->nlp_flag, - kref_read(&ndlp->kref)); + "node put: did:x%x flg:x%x refcnt:x%x", + ndlp->nlp_DID, ndlp->nlp_flag, + kref_read(&ndlp->kref)); phba = ndlp->phba; spin_lock_irqsave(&phba->ndlp_lock, flags); /* Check the ndlp memory free acknowledge flag to avoid the @@ -6277,9 +6390,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) if (NLP_CHK_FREE_ACK(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, - "0274 lpfc_nlp_put: ndlp:x%p " + "0274 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); return 1; } @@ -6290,9 +6403,9 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) if (NLP_CHK_IACT_REQ(ndlp)) { spin_unlock_irqrestore(&phba->ndlp_lock, flags); lpfc_printf_vlog(ndlp->vport, KERN_WARNING, LOG_NODE, - "0275 lpfc_nlp_put: ndlp:x%p " + "0275 %s: ndlp:x%px " "usgmap:x%x refcnt:%d\n", - (void *)ndlp, ndlp->nlp_usg_map, + __func__, (void *)ndlp, ndlp->nlp_usg_map, kref_read(&ndlp->kref)); return 1; } @@ -6382,7 +6495,8 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) goto out; } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { ret = 1; - lpfc_printf_log(phba, KERN_INFO, LOG_ELS, + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, "2624 RPI %x DID %x flag %x " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h index 5b439a6dcde1..436cdc8c5ef4 100644 --- a/drivers/scsi/lpfc/lpfc_hw.h +++ b/drivers/scsi/lpfc/lpfc_hw.h @@ -843,7 +843,7 @@ typedef struct _ADISC { /* Structure is in Big Endian format */ struct lpfc_name portName; struct lpfc_name nodeName; uint32_t DID; -} ADISC; +} __packed ADISC; typedef struct _FARP { /* Structure is in Big Endian format */ uint32_t Mflags:8; @@ -873,7 +873,7 @@ typedef struct _FAN { /* Structure is in Big Endian format */ uint32_t Fdid; struct lpfc_name FportName; struct lpfc_name FnodeName; -} FAN; +} __packed FAN; typedef struct _SCR { /* Structure is in Big Endian format */ uint8_t resvd1; @@ -917,7 +917,7 @@ typedef struct _RNID { /* Structure is in Big Endian format */ union { RNID_TOP_DISC topologyDisc; /* topology disc (0xdf) */ } un; -} RNID; +} __packed RNID; typedef struct _RPS { /* Structure is in Big Endian format */ union { diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 77f9a55a3f54..9a064b96e570 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -210,7 +210,6 @@ struct lpfc_sli_intf { #define LPFC_MAX_IMAX 5000000 #define LPFC_DEF_IMAX 0 -#define LPFC_IMAX_THRESHOLD 1000 #define LPFC_MAX_AUTO_EQ_DELAY 120 #define LPFC_EQ_DELAY_STEP 15 #define LPFC_EQD_ISR_TRIGGER 20000 @@ -2050,6 +2049,23 @@ struct sli4_sge { /* SLI-4 */ uint32_t sge_len; }; +struct sli4_hybrid_sgl { + struct list_head list_node; + struct sli4_sge *dma_sgl; + dma_addr_t dma_phys_sgl; +}; + +struct fcp_cmd_rsp_buf { + struct list_head list_node; + + /* for storing cmd/rsp dma alloc'ed virt_addr */ + struct fcp_cmnd *fcp_cmnd; + struct fcp_rsp *fcp_rsp; + + /* for storing this cmd/rsp's dma mapped phys addr from per CPU pool */ + dma_addr_t fcp_cmd_rsp_dma_handle; +}; + struct sli4_sge_diseed { /* SLI-4 */ uint32_t ref_tag; uint32_t ref_tag_tran; @@ -2303,6 +2319,7 @@ struct lpfc_mbx_redisc_fcf_tbl { #define ADD_STATUS_OPERATION_ALREADY_ACTIVE 0x67 #define ADD_STATUS_FW_NOT_SUPPORTED 0xEB #define ADD_STATUS_INVALID_REQUEST 0x4B +#define ADD_STATUS_FW_DOWNLOAD_HW_DISABLED 0x58 struct lpfc_mbx_sli4_config { struct mbox_header header; @@ -2792,6 +2809,15 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_trunk_SHIFT 12 #define lpfc_mbx_rd_conf_trunk_MASK 0x0000000F #define lpfc_mbx_rd_conf_trunk_WORD word2 +#define lpfc_mbx_rd_conf_pt_SHIFT 20 +#define lpfc_mbx_rd_conf_pt_MASK 0x00000003 +#define lpfc_mbx_rd_conf_pt_WORD word2 +#define lpfc_mbx_rd_conf_tf_SHIFT 22 +#define lpfc_mbx_rd_conf_tf_MASK 0x00000001 +#define lpfc_mbx_rd_conf_tf_WORD word2 +#define lpfc_mbx_rd_conf_ptv_SHIFT 23 +#define lpfc_mbx_rd_conf_ptv_MASK 0x00000001 +#define lpfc_mbx_rd_conf_ptv_WORD word2 #define lpfc_mbx_rd_conf_topology_SHIFT 24 #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF #define lpfc_mbx_rd_conf_topology_WORD word2 @@ -3449,6 +3475,9 @@ struct lpfc_sli4_parameters { #define cfg_xib_SHIFT 4 #define cfg_xib_MASK 0x00000001 #define cfg_xib_WORD word19 +#define cfg_xpsgl_SHIFT 6 +#define cfg_xpsgl_MASK 0x00000001 +#define cfg_xpsgl_WORD word19 #define cfg_eqdr_SHIFT 8 #define cfg_eqdr_MASK 0x00000001 #define cfg_eqdr_WORD word19 @@ -3459,6 +3488,13 @@ struct lpfc_sli4_parameters { #define cfg_bv1s_SHIFT 10 #define cfg_bv1s_MASK 0x00000001 #define cfg_bv1s_WORD word19 +#define cfg_pvl_SHIFT 13 +#define cfg_pvl_MASK 0x00000001 +#define cfg_pvl_WORD word19 + +#define cfg_nsler_SHIFT 12 +#define cfg_nsler_MASK 0x00000001 +#define cfg_nsler_WORD word19 uint32_t word20; #define cfg_max_tow_xri_SHIFT 0 @@ -3494,6 +3530,7 @@ struct lpfc_sli4_parameters { #define LPFC_SET_UE_RECOVERY 0x10 #define LPFC_SET_MDS_DIAGS 0x11 +#define LPFC_SET_DUAL_DUMP 0x1e struct lpfc_mbx_set_feature { struct mbox_header header; uint32_t feature; @@ -3508,6 +3545,15 @@ struct lpfc_mbx_set_feature { #define lpfc_mbx_set_feature_mds_deep_loopbk_SHIFT 1 #define lpfc_mbx_set_feature_mds_deep_loopbk_MASK 0x00000001 #define lpfc_mbx_set_feature_mds_deep_loopbk_WORD word6 +#define lpfc_mbx_set_feature_dd_SHIFT 0 +#define lpfc_mbx_set_feature_dd_MASK 0x00000001 +#define lpfc_mbx_set_feature_dd_WORD word6 +#define lpfc_mbx_set_feature_ddquery_SHIFT 1 +#define lpfc_mbx_set_feature_ddquery_MASK 0x00000001 +#define lpfc_mbx_set_feature_ddquery_WORD word6 +#define LPFC_DISABLE_DUAL_DUMP 0 +#define LPFC_ENABLE_DUAL_DUMP 1 +#define LPFC_QUERY_OP_DUAL_DUMP 2 uint32_t word7; #define lpfc_mbx_set_feature_UERP_SHIFT 0 #define lpfc_mbx_set_feature_UERP_MASK 0x0000ffff @@ -3879,6 +3925,9 @@ struct lpfc_mbx_wr_object { #define LPFC_CHANGE_STATUS_FW_RESET 0x02 #define LPFC_CHANGE_STATUS_PORT_MIGRATION 0x04 #define LPFC_CHANGE_STATUS_PCI_RESET 0x05 +#define lpfc_wr_object_csf_SHIFT 8 +#define lpfc_wr_object_csf_MASK 0x00000001 +#define lpfc_wr_object_csf_WORD word5 } response; } u; }; @@ -4237,6 +4286,8 @@ struct lpfc_acqe_sli { #define LPFC_SLI_EVENT_TYPE_DIAG_DUMP 0x5 #define LPFC_SLI_EVENT_TYPE_MISCONFIGURED 0x9 #define LPFC_SLI_EVENT_TYPE_REMOTE_DPORT 0xA +#define LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN 0xF +#define LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE 0x10 }; /* @@ -4314,6 +4365,12 @@ struct wqe_common { #define wqe_rcvoxid_SHIFT 16 #define wqe_rcvoxid_MASK 0x0000FFFF #define wqe_rcvoxid_WORD word9 +#define wqe_sof_SHIFT 24 +#define wqe_sof_MASK 0x000000FF +#define wqe_sof_WORD word9 +#define wqe_eof_SHIFT 16 +#define wqe_eof_MASK 0x000000FF +#define wqe_eof_WORD word9 uint32_t word10; #define wqe_ebde_cnt_SHIFT 0 #define wqe_ebde_cnt_MASK 0x0000000f @@ -4595,6 +4652,7 @@ struct lpfc_nvme_prli { #define prli_type_code_WORD word1 uint32_t word_rsvd2; uint32_t word_rsvd3; + uint32_t word4; #define prli_fba_SHIFT 0 #define prli_fba_MASK 0x00000001 @@ -4611,6 +4669,9 @@ struct lpfc_nvme_prli { #define prli_conf_SHIFT 7 #define prli_conf_MASK 0x00000001 #define prli_conf_WORD word4 +#define prli_nsler_SHIFT 8 +#define prli_nsler_MASK 0x00000001 +#define prli_nsler_WORD word4 uint32_t word5; #define prli_fb_sz_SHIFT 0 #define prli_fb_sz_MASK 0x0000ffff @@ -4625,6 +4686,7 @@ struct create_xri_wqe { uint32_t rsvd_12_15[4]; /* word 12-15 */ }; +#define INHIBIT_ABORT 1 #define T_REQUEST_TAG 3 #define T_XRI_TAG 1 @@ -4773,8 +4835,8 @@ union lpfc_wqe128 { struct send_frame_wqe send_frame; }; -#define MAGIC_NUMER_G6 0xFEAA0003 -#define MAGIC_NUMER_G7 0xFEAA0005 +#define MAGIC_NUMBER_G6 0xFEAA0003 +#define MAGIC_NUMBER_G7 0xFEAA0005 struct lpfc_grp_hdr { uint32_t size; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a7549ae32542..5a605773dd0a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -39,6 +39,9 @@ #include <linux/msi.h> #include <linux/irq.h> #include <linux/bitops.h> +#include <linux/crash_dump.h> +#include <linux/cpu.h> +#include <linux/cpuhotplug.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> @@ -65,15 +68,13 @@ #include "lpfc_version.h" #include "lpfc_ids.h" -char *_dump_buf_data; -unsigned long _dump_buf_data_order; -char *_dump_buf_dif; -unsigned long _dump_buf_dif_order; -spinlock_t _dump_buf_lock; - +static enum cpuhp_state lpfc_cpuhp_state; /* Used when mapping IRQ vectors in a driver centric manner */ static uint32_t lpfc_present_cpu; +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_remove(struct lpfc_hba *phba); +static void lpfc_cpuhp_add(struct lpfc_hba *phba); static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); static int lpfc_sli4_queue_verify(struct lpfc_hba *); @@ -1081,8 +1082,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; - spin_lock(&qp->abts_scsi_buf_list_lock); - list_splice_init(&qp->lpfc_abts_scsi_buf_list, + spin_lock(&qp->abts_io_buf_list_lock); + list_splice_init(&qp->lpfc_abts_io_buf_list, &aborts); list_for_each_entry_safe(psb, psb_next, &aborts, list) { @@ -1093,29 +1094,11 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_lock(&qp->io_buf_list_put_lock); list_splice_init(&aborts, &qp->lpfc_io_buf_list_put); qp->put_io_bufs += qp->abts_scsi_io_bufs; + qp->put_io_bufs += qp->abts_nvme_io_bufs; qp->abts_scsi_io_bufs = 0; + qp->abts_nvme_io_bufs = 0; spin_unlock(&qp->io_buf_list_put_lock); - spin_unlock(&qp->abts_scsi_buf_list_lock); - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - spin_lock(&qp->abts_nvme_buf_list_lock); - list_splice_init(&qp->lpfc_abts_nvme_buf_list, - &nvme_aborts); - list_for_each_entry_safe(psb, psb_next, &nvme_aborts, - list) { - psb->pCmd = NULL; - psb->status = IOSTAT_SUCCESS; - cnt++; - } - spin_lock(&qp->io_buf_list_put_lock); - qp->put_io_bufs += qp->abts_nvme_io_bufs; - qp->abts_nvme_io_bufs = 0; - list_splice_init(&nvme_aborts, - &qp->lpfc_io_buf_list_put); - spin_unlock(&qp->io_buf_list_put_lock); - spin_unlock(&qp->abts_nvme_buf_list_lock); - - } + spin_unlock(&qp->abts_io_buf_list_lock); } spin_unlock_irq(&phba->hbalock); @@ -1258,7 +1241,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work) struct lpfc_hba, eq_delay_work); struct lpfc_eq_intr_info *eqi, *eqi_new; struct lpfc_queue *eq, *eq_next; - unsigned char *eqcnt = NULL; + unsigned char *ena_delay = NULL; uint32_t usdelay; int i; @@ -1269,35 +1252,36 @@ lpfc_hb_eq_delay_work(struct work_struct *work) phba->pport->fc_flag & FC_OFFLINE_MODE) goto requeue; - eqcnt = kcalloc(num_possible_cpus(), sizeof(unsigned char), - GFP_KERNEL); - if (!eqcnt) + ena_delay = kcalloc(phba->sli4_hba.num_possible_cpu, sizeof(*ena_delay), + GFP_KERNEL); + if (!ena_delay) goto requeue; - /* Loop thru all IRQ vectors */ for (i = 0; i < phba->cfg_irq_chann; i++) { /* Get the EQ corresponding to the IRQ vector */ eq = phba->sli4_hba.hba_eq_hdl[i].eq; - if (eq && eqcnt[eq->last_cpu] < 2) - eqcnt[eq->last_cpu]++; - continue; + if (!eq) + continue; + if (eq->q_mode || eq->q_flag & HBA_EQ_DELAY_CHK) { + eq->q_flag &= ~HBA_EQ_DELAY_CHK; + ena_delay[eq->last_cpu] = 1; + } } for_each_present_cpu(i) { - if (phba->cfg_irq_chann > 1 && eqcnt[i] < 2) - continue; - eqi = per_cpu_ptr(phba->sli4_hba.eq_info, i); - - usdelay = (eqi->icnt / LPFC_IMAX_THRESHOLD) * - LPFC_EQ_DELAY_STEP; - if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) - usdelay = LPFC_MAX_AUTO_EQ_DELAY; + if (ena_delay[i]) { + usdelay = (eqi->icnt >> 10) * LPFC_EQ_DELAY_STEP; + if (usdelay > LPFC_MAX_AUTO_EQ_DELAY) + usdelay = LPFC_MAX_AUTO_EQ_DELAY; + } else { + usdelay = 0; + } eqi->icnt = 0; list_for_each_entry_safe(eq, eq_next, &eqi->list, cpu_list) { - if (eq->last_cpu != i) { + if (unlikely(eq->last_cpu != i)) { eqi_new = per_cpu_ptr(phba->sli4_hba.eq_info, eq->last_cpu); list_move_tail(&eq->cpu_list, &eqi_new->list); @@ -1309,7 +1293,7 @@ lpfc_hb_eq_delay_work(struct work_struct *work) } } - kfree(eqcnt); + kfree(ena_delay); requeue: queue_delayed_work(phba->wq, &phba->eq_delay_work, @@ -1378,7 +1362,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba) if (vports != NULL) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { lpfc_rcv_seq_check_edtov(vports[i]); - lpfc_fdmi_num_disc_check(vports[i]); + lpfc_fdmi_change_check(vports[i]); } lpfc_destroy_vport_work_array(phba, vports); @@ -1535,6 +1519,7 @@ lpfc_sli4_offline_eratt(struct lpfc_hba *phba) spin_unlock_irq(&phba->hbalock); lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT); + lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); lpfc_hba_down_post(phba); lpfc_unblock_mgmt_io(phba); @@ -1796,6 +1781,7 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action, "2887 Reset Needed: Attempting Port " "Recovery...\n"); lpfc_offline_prep(phba, mbx_action); + lpfc_sli_flush_io_rings(phba); lpfc_offline(phba); /* release interrupt for possible resource change */ lpfc_sli4_disable_intr(phba); @@ -1915,7 +1901,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "7624 Firmware not ready: Failing UE recovery," " waited %dSec", i); - lpfc_sli4_offline_eratt(phba); + phba->link_state = LPFC_HBA_ERROR; break; case LPFC_SLI_INTF_IF_TYPE_2: @@ -1989,9 +1975,8 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) } /* fall through for not able to recover */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3152 Unrecoverable error, bring the port " - "offline\n"); - lpfc_sli4_offline_eratt(phba); + "3152 Unrecoverable error\n"); + phba->link_state = LPFC_HBA_ERROR; break; case LPFC_SLI_INTF_IF_TYPE_1: default: @@ -2863,7 +2848,7 @@ lpfc_cleanup(struct lpfc_vport *vport) &vport->fc_nodes, nlp_listp) { lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_NODE, - "0282 did:x%x ndlp:x%p " + "0282 did:x%x ndlp:x%px " "usgmap:x%x refcnt:%d\n", ndlp->nlp_DID, (void *)ndlp, ndlp->nlp_usg_map, @@ -3065,11 +3050,12 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) continue; } ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0009 rpi:%x DID:%x " - "flg:%x map:%x %p\n", ndlp->nlp_rpi, - ndlp->nlp_DID, ndlp->nlp_flag, - ndlp->nlp_usg_map, ndlp); + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0009 Assign RPI x%x to ndlp x%px " + "DID:x%06x flg:x%x map:x%x\n", + ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, + ndlp->nlp_flag, ndlp->nlp_usg_map); } } lpfc_destroy_vport_work_array(phba, vports); @@ -3252,12 +3238,8 @@ static void lpfc_destroy_multixri_pools(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) lpfc_destroy_expedite_pool(phba); - if (!(phba->pport->load_flag & FC_UNLOADING)) { - lpfc_sli_flush_fcp_rings(phba); - - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - lpfc_sli_flush_nvme_rings(phba); - } + if (!(phba->pport->load_flag & FC_UNLOADING)) + lpfc_sli_flush_io_rings(phba); hwq_count = phba->cfg_hdw_queue; @@ -3403,6 +3385,8 @@ lpfc_online(struct lpfc_hba *phba) if (phba->cfg_xri_rebalancing) lpfc_create_multixri_pools(phba); + lpfc_cpuhp_add(phba); + lpfc_unblock_mgmt_io(phba); return 0; } @@ -3469,10 +3453,15 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) list_for_each_entry_safe(ndlp, next_ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (!NLP_CHK_NODE_ACT(ndlp)) - continue; - if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) + if ((!NLP_CHK_NODE_ACT(ndlp)) || + ndlp->nlp_state == NLP_STE_UNUSED_NODE) { + /* Driver must assume RPI is invalid for + * any unused or inactive node. + */ + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; continue; + } + if (ndlp->nlp_type & NLP_FABRIC) { lpfc_disc_state_machine(vports[i], ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); @@ -3488,16 +3477,16 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) * comes back online. */ if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(ndlp->vport, - KERN_INFO, LOG_NODE, - "0011 lpfc_offline: " - "ndlp:x%p did %x " - "usgmap:x%x rpi:%x\n", - ndlp, ndlp->nlp_DID, - ndlp->nlp_usg_map, - ndlp->nlp_rpi); - + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0011 Free RPI x%x on " + "ndlp:x%px did x%x " + "usgmap:x%x\n", + ndlp->nlp_rpi, ndlp, + ndlp->nlp_DID, + ndlp->nlp_usg_map); lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } lpfc_unreg_rpi(vports[i], ndlp); } @@ -3561,6 +3550,7 @@ lpfc_offline(struct lpfc_hba *phba) spin_unlock_irq(shost->host_lock); } lpfc_destroy_vport_work_array(phba, vports); + __lpfc_cpuhp_remove(phba); if (phba->cfg_xri_rebalancing) lpfc_destroy_multixri_pools(phba); @@ -3636,6 +3626,9 @@ lpfc_io_free(struct lpfc_hba *phba) qp->put_io_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); kfree(lpfc_ncmd); qp->total_io_bufs--; } @@ -3649,6 +3642,9 @@ lpfc_io_free(struct lpfc_hba *phba) qp->get_io_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); kfree(lpfc_ncmd); qp->total_io_bufs--; } @@ -4097,18 +4093,9 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) LIST_HEAD(post_nblist); LIST_HEAD(nvme_nblist); - /* Sanity check to ensure our sizing is right for both SCSI and NVME */ - if (sizeof(struct lpfc_io_buf) > LPFC_COMMON_IO_BUF_SZ) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "6426 Common buffer size %zd exceeds %d\n", - sizeof(struct lpfc_io_buf), - LPFC_COMMON_IO_BUF_SZ); - return 0; - } - phba->sli4_hba.io_xri_cnt = 0; for (bcnt = 0; bcnt < num_to_alloc; bcnt++) { - lpfc_ncmd = kzalloc(LPFC_COMMON_IO_BUF_SZ, GFP_KERNEL); + lpfc_ncmd = kzalloc(sizeof(*lpfc_ncmd), GFP_KERNEL); if (!lpfc_ncmd) break; /* @@ -4124,22 +4111,30 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc) break; } - /* - * 4K Page alignment is CRITICAL to BlockGuard, double check - * to be sure. - */ - if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && - (((unsigned long)(lpfc_ncmd->data) & - (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { - lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "3369 Memory alignment err: addr=%lx\n", - (unsigned long)lpfc_ncmd->data); - dma_pool_free(phba->lpfc_sg_dma_buf_pool, - lpfc_ncmd->data, lpfc_ncmd->dma_handle); - kfree(lpfc_ncmd); - break; + if (phba->cfg_xpsgl && !phba->nvmet_support) { + INIT_LIST_HEAD(&lpfc_ncmd->dma_sgl_xtra_list); + } else { + /* + * 4K Page alignment is CRITICAL to BlockGuard, double + * check to be sure. + */ + if ((phba->sli3_options & LPFC_SLI3_BG_ENABLED) && + (((unsigned long)(lpfc_ncmd->data) & + (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { + lpfc_printf_log(phba, KERN_ERR, LOG_FCP, + "3369 Memory alignment err: " + "addr=%lx\n", + (unsigned long)lpfc_ncmd->data); + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + lpfc_ncmd->data, + lpfc_ncmd->dma_handle); + kfree(lpfc_ncmd); + break; + } } + INIT_LIST_HEAD(&lpfc_ncmd->dma_cmd_rsp_list); + lxri = lpfc_sli4_next_xritag(phba); if (lxri == NO_XRI) { dma_pool_free(phba->lpfc_sg_dma_buf_pool, @@ -4309,14 +4304,20 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) shost->max_cmd_len = 16; if (phba->sli_rev == LPFC_SLI_REV4) { - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - shost->nr_hw_queues = phba->cfg_hdw_queue; - else - shost->nr_hw_queues = phba->sli4_hba.num_present_cpu; + if (!phba->cfg_fcp_mq_threshold || + phba->cfg_fcp_mq_threshold > phba->cfg_hdw_queue) + phba->cfg_fcp_mq_threshold = phba->cfg_hdw_queue; + + shost->nr_hw_queues = min_t(int, 2 * num_possible_nodes(), + phba->cfg_fcp_mq_threshold); shost->dma_boundary = phba->sli4_hba.pc_sli4_params.sge_supp_len-1; - shost->sg_tablesize = phba->cfg_scsi_seg_cnt; + + if (phba->cfg_xpsgl && !phba->nvmet_support) + shost->sg_tablesize = LPFC_MAX_SG_TABLESIZE; + else + shost->sg_tablesize = phba->cfg_scsi_seg_cnt; } else /* SLI-3 has a limited number of hardware queues (3), * thus there is only one for FCP processing. @@ -5288,10 +5289,10 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) evt_type = bf_get(lpfc_trailer_type, acqe_sli); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "2901 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d\n", + "2901 Async SLI event - Type:%d, Event Data: x%08x " + "x%08x x%08x x%08x\n", evt_type, acqe_sli->event_data1, acqe_sli->event_data2, - evt_type); + acqe_sli->reserved, acqe_sli->trailer); port_name = phba->Port[0]; if (port_name == 0x00) @@ -5438,11 +5439,26 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli) "Event Data1:x%08x Event Data2: x%08x\n", acqe_sli->event_data1, acqe_sli->event_data2); break; + case LPFC_SLI_EVENT_TYPE_MISCONF_FAWWN: + /* Misconfigured WWN. Reports that the SLI Port is configured + * to use FA-WWN, but the attached device doesn’t support it. + * No driver action is required. + * Event Data1 - N.A, Event Data2 - N.A + */ + lpfc_log_msg(phba, KERN_WARNING, LOG_SLI, + "2699 Misconfigured FA-WWN - Attached device does " + "not support FA-WWN\n"); + break; + case LPFC_SLI_EVENT_TYPE_EEPROM_FAILURE: + /* EEPROM failure. No driver action is required */ + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2518 EEPROM failure - " + "Event Data1: x%08x Event Data2: x%08x\n", + acqe_sli->event_data1, acqe_sli->event_data2); + break; default: lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "3193 Async SLI event - Event Data1:x%08x Event Data2:" - "x%08x SLI Event Type:%d\n", - acqe_sli->event_data1, acqe_sli->event_data2, + "3193 Unrecognized SLI event, type: 0x%x", evt_type); break; } @@ -5867,7 +5883,7 @@ void lpfc_sli4_async_event_proc(struct lpfc_hba *phba) break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, - "1804 Invalid asynchrous event code: " + "1804 Invalid asynchronous event code: " "x%x\n", bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)); break; @@ -5981,6 +5997,29 @@ static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode) } /** + * lpfc_cpumask_of_node_init - initalizes cpumask of phba's NUMA node + * @phba: Pointer to HBA context object. + * + **/ +static void +lpfc_cpumask_of_node_init(struct lpfc_hba *phba) +{ + unsigned int cpu, numa_node; + struct cpumask *numa_mask = &phba->sli4_hba.numa_mask; + + cpumask_clear(numa_mask); + + /* Check if we're a NUMA architecture */ + numa_node = dev_to_node(&phba->pcidev->dev); + if (numa_node == NUMA_NO_NODE) + return; + + for_each_possible_cpu(cpu) + if (cpu_to_node(cpu) == numa_node) + cpumask_set_cpu(cpu, numa_mask); +} + +/** * lpfc_enable_pci_dev - Enable a generic PCI device. * @phba: pointer to lpfc hba data structure. * @@ -6334,6 +6373,24 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ)) return -ENOMEM; + phba->lpfc_sg_dma_buf_pool = + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, + BPL_ALIGN_SZ, 0); + + if (!phba->lpfc_sg_dma_buf_pool) + goto fail_free_mem; + + phba->lpfc_cmd_rsp_buf_pool = + dma_pool_create("lpfc_cmd_rsp_buf_pool", + &phba->pcidev->dev, + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp), + BPL_ALIGN_SZ, 0); + + if (!phba->lpfc_cmd_rsp_buf_pool) + goto fail_free_dma_buf_pool; + /* * Enable sr-iov virtual functions if supported and configured * through the module parameter. @@ -6352,6 +6409,13 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) } return 0; + +fail_free_dma_buf_pool: + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; +fail_free_mem: + lpfc_mem_free(phba); + return -ENOMEM; } /** @@ -6396,8 +6460,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) u32 if_fam; phba->sli4_hba.num_present_cpu = lpfc_present_cpu; - phba->sli4_hba.num_possible_cpu = num_possible_cpus(); + phba->sli4_hba.num_possible_cpu = cpumask_last(cpu_possible_mask) + 1; phba->sli4_hba.curr_disp_cpu = 0; + lpfc_cpumask_of_node_init(phba); /* Get all the module params for configuring this host */ lpfc_get_cfgparam(phba); @@ -6412,6 +6477,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) if (rc) return -ENODEV; + /* Allocate all driver workqueues here */ + + /* The lpfc_wq workqueue for deferred irq use */ + phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); + /* * Initialize timers used by driver */ @@ -6446,102 +6516,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * The WQ create will allocate the ring. */ - /* - * 1 for cmd, 1 for rsp, NVME adds an extra one - * for boundary conditions in its max_sgl_segment template. - */ - extra = 2; - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - extra++; - - /* - * It doesn't matter what family our adapter is in, we are - * limited to 2 Pages, 512 SGEs, for our SGL. - * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp - */ - max_buf_size = (2 * SLI4_PAGE_SIZE); - - /* - * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size - * used to create the sg_dma_buf_pool must be calculated. - */ - if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { - /* - * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, - * the FCP rsp, and a SGE. Sice we have no control - * over how many protection segments the SCSI Layer - * will hand us (ie: there could be one for every block - * in the IO), just allocate enough SGEs to accomidate - * our max amount and we need to limit lpfc_sg_seg_cnt - * to minimize the risk of running out. - */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + max_buf_size; - - /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ - phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; - - /* - * If supporting DIF, reduce the seg count for scsi to - * allow room for the DIF sges. - */ - if (phba->cfg_enable_bg && - phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) - phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; - else - phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; - - } else { - /* - * The scsi_buf for a regular I/O holds the FCP cmnd, - * the FCP rsp, a SGE for each, and a SGE for up to - * cfg_sg_seg_cnt data segments. - */ - phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + - sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + extra) * - sizeof(struct sli4_sge)); - - /* Total SGEs for scsi_sg_list */ - phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; - phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; - - /* - * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only - * need to post 1 page for the SGL. - */ - } - - /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { - lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, - "6300 Reducing NVME sg segment " - "cnt to %d\n", - LPFC_MAX_NVME_SEG_CNT); - phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; - } else - phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; - } - - /* Initialize the host templates with the updated values. */ - lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; - lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; - lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; - - if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) - phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; - else - phba->cfg_sg_dma_buf_size = - SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, - "9087 sg_seg_cnt:%d dmabuf_size:%d " - "total:%d scsi:%d nvme:%d\n", - phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, - phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, - phba->cfg_nvme_seg_cnt); - /* Initialize buffer queue management fields */ INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list); phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc; @@ -6550,11 +6524,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* * Initialize the SLI Layer to run with lpfc SLI4 HBAs. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { - /* Initialize the Abort scsi buffer list used by driver */ - spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock); - INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list); - } + /* Initialize the Abort buffer list used by driver */ + spin_lock_init(&phba->sli4_hba.abts_io_buf_list_lock); + INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_io_buf_list); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Initialize the Abort nvme buffer list used by driver */ @@ -6762,6 +6734,131 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } } + /* + * 1 for cmd, 1 for rsp, NVME adds an extra one + * for boundary conditions in its max_sgl_segment template. + */ + extra = 2; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + extra++; + + /* + * It doesn't matter what family our adapter is in, we are + * limited to 2 Pages, 512 SGEs, for our SGL. + * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp + */ + max_buf_size = (2 * SLI4_PAGE_SIZE); + + /* + * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size + * used to create the sg_dma_buf_pool must be calculated. + */ + if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) { + /* Both cfg_enable_bg and cfg_external_dif code paths */ + + /* + * The scsi_buf for a T10-DIF I/O holds the FCP cmnd, + * the FCP rsp, and a SGE. Sice we have no control + * over how many protection segments the SCSI Layer + * will hand us (ie: there could be one for every block + * in the IO), just allocate enough SGEs to accomidate + * our max amount and we need to limit lpfc_sg_seg_cnt + * to minimize the risk of running out. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + max_buf_size; + + /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */ + phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT; + + /* + * If supporting DIF, reduce the seg count for scsi to + * allow room for the DIF sges. + */ + if (phba->cfg_enable_bg && + phba->cfg_sg_seg_cnt > LPFC_MAX_BG_SLI4_SEG_CNT_DIF) + phba->cfg_scsi_seg_cnt = LPFC_MAX_BG_SLI4_SEG_CNT_DIF; + else + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; + + } else { + /* + * The scsi_buf for a regular I/O holds the FCP cmnd, + * the FCP rsp, a SGE for each, and a SGE for up to + * cfg_sg_seg_cnt data segments. + */ + phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp) + + ((phba->cfg_sg_seg_cnt + extra) * + sizeof(struct sli4_sge)); + + /* Total SGEs for scsi_sg_list */ + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; + phba->cfg_scsi_seg_cnt = phba->cfg_sg_seg_cnt; + + /* + * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only + * need to post 1 page for the SGL. + */ + } + + if (phba->cfg_xpsgl && !phba->nvmet_support) + phba->cfg_sg_dma_buf_size = LPFC_DEFAULT_XPSGL_SIZE; + else if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ) + phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ; + else + phba->cfg_sg_dma_buf_size = + SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size); + + phba->border_sge_num = phba->cfg_sg_dma_buf_size / + sizeof(struct sli4_sge); + + /* Limit to LPFC_MAX_NVME_SEG_CNT for NVME. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + if (phba->cfg_sg_seg_cnt > LPFC_MAX_NVME_SEG_CNT) { + lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, + "6300 Reducing NVME sg segment " + "cnt to %d\n", + LPFC_MAX_NVME_SEG_CNT); + phba->cfg_nvme_seg_cnt = LPFC_MAX_NVME_SEG_CNT; + } else + phba->cfg_nvme_seg_cnt = phba->cfg_sg_seg_cnt; + } + + /* Initialize the host templates with the updated values. */ + lpfc_vport_template.sg_tablesize = phba->cfg_scsi_seg_cnt; + lpfc_template.sg_tablesize = phba->cfg_scsi_seg_cnt; + lpfc_template_no_hr.sg_tablesize = phba->cfg_scsi_seg_cnt; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP, + "9087 sg_seg_cnt:%d dmabuf_size:%d " + "total:%d scsi:%d nvme:%d\n", + phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size, + phba->cfg_total_seg_cnt, phba->cfg_scsi_seg_cnt, + phba->cfg_nvme_seg_cnt); + + if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) + i = phba->cfg_sg_dma_buf_size; + else + i = SLI4_PAGE_SIZE; + + phba->lpfc_sg_dma_buf_pool = + dma_pool_create("lpfc_sg_dma_buf_pool", + &phba->pcidev->dev, + phba->cfg_sg_dma_buf_size, + i, 0); + if (!phba->lpfc_sg_dma_buf_pool) + goto out_free_bsmbx; + + phba->lpfc_cmd_rsp_buf_pool = + dma_pool_create("lpfc_cmd_rsp_buf_pool", + &phba->pcidev->dev, + sizeof(struct fcp_cmnd) + + sizeof(struct fcp_rsp), + i, 0); + if (!phba->lpfc_cmd_rsp_buf_pool) + goto out_free_sg_dma_buf; + mempool_free(mboxq, phba->mbox_mem_pool); /* Verify OAS is supported */ @@ -6773,12 +6870,12 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) /* Verify all the SLI4 queues */ rc = lpfc_sli4_queue_verify(phba); if (rc) - goto out_free_bsmbx; + goto out_free_cmd_rsp_buf; /* Create driver internal CQE event pool */ rc = lpfc_sli4_cq_event_pool_create(phba); if (rc) - goto out_free_bsmbx; + goto out_free_cmd_rsp_buf; /* Initialize sgl lists per host */ lpfc_init_sgl_list(phba); @@ -6869,6 +6966,12 @@ out_free_active_sgl: lpfc_free_active_sgl(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); +out_free_cmd_rsp_buf: + dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); + phba->lpfc_cmd_rsp_buf_pool = NULL; +out_free_sg_dma_buf: + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: @@ -6895,6 +6998,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) phba->sli4_hba.num_possible_cpu = 0; phba->sli4_hba.num_present_cpu = 0; phba->sli4_hba.curr_disp_cpu = 0; + cpumask_clear(&phba->sli4_hba.numa_mask); /* Free memory allocated for fast-path work queue handles */ kfree(phba->sli4_hba.hba_eq_hdl); @@ -6995,12 +7099,6 @@ lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba) return error; } - /* The lpfc_wq workqueue for deferred irq use, is only used for SLI4 */ - if (phba->sli_rev == LPFC_SLI_REV4) - phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0); - else - phba->wq = NULL; - return 0; } @@ -7074,7 +7172,7 @@ lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count) if (iocbq_entry == NULL) { printk(KERN_ERR "%s: only allocated %d iocbs of " "expected %d count. Unloading driver.\n", - __func__, i, LPFC_IOCB_LIST_CNT); + __func__, i, iocb_count); goto out_free_iocbq; } @@ -7493,18 +7591,10 @@ lpfc_create_shost(struct lpfc_hba *phba) if (phba->nvmet_support) { /* Only 1 vport (pport) will support NVME target */ - if (phba->txrdy_payload_pool == NULL) { - phba->txrdy_payload_pool = dma_pool_create( - "txrdy_pool", &phba->pcidev->dev, - TXRDY_PAYLOAD_LEN, 16, 0); - if (phba->txrdy_payload_pool) { - phba->targetport = NULL; - phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; - lpfc_printf_log(phba, KERN_INFO, - LOG_INIT | LOG_NVME_DISC, - "6076 NVME Target Found\n"); - } - } + phba->targetport = NULL; + phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME_DISC, + "6076 NVME Target Found\n"); } lpfc_debugfs_initialize(vport); @@ -7561,7 +7651,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) uint32_t old_mask; uint32_t old_guard; - int pagecnt = 10; if (phba->cfg_prot_mask && phba->cfg_prot_guard) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "1478 Registering BlockGuard with the " @@ -7598,56 +7687,6 @@ lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost) "layer, Bad protection parameters: %d %d\n", old_mask, old_guard); } - - if (!_dump_buf_data) { - while (pagecnt) { - spin_lock_init(&_dump_buf_lock); - _dump_buf_data = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_data) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9043 BLKGRD: allocated %d pages for " - "_dump_buf_data at 0x%p\n", - (1 << pagecnt), _dump_buf_data); - _dump_buf_data_order = pagecnt; - memset(_dump_buf_data, 0, - ((1 << PAGE_SHIFT) << pagecnt)); - break; - } else - --pagecnt; - } - if (!_dump_buf_data_order) - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9044 BLKGRD: ERROR unable to allocate " - "memory for hexdump\n"); - } else - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9045 BLKGRD: already allocated _dump_buf_data=0x%p" - "\n", _dump_buf_data); - if (!_dump_buf_dif) { - while (pagecnt) { - _dump_buf_dif = - (char *) __get_free_pages(GFP_KERNEL, pagecnt); - if (_dump_buf_dif) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9046 BLKGRD: allocated %d pages for " - "_dump_buf_dif at 0x%p\n", - (1 << pagecnt), _dump_buf_dif); - _dump_buf_dif_order = pagecnt; - memset(_dump_buf_dif, 0, - ((1 << PAGE_SHIFT) << pagecnt)); - break; - } else - --pagecnt; - } - if (!_dump_buf_dif_order) - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9047 BLKGRD: ERROR unable to allocate " - "memory for hexdump\n"); - } else - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n", - _dump_buf_dif); } /** @@ -8234,6 +8273,86 @@ lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba) memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx)); } +static const char * const lpfc_topo_to_str[] = { + "Loop then P2P", + "Loopback", + "P2P Only", + "Unsupported", + "Loop Only", + "Unsupported", + "P2P then Loop", +}; + +/** + * lpfc_map_topology - Map the topology read from READ_CONFIG + * @phba: pointer to lpfc hba data structure. + * @rdconf: pointer to read config data + * + * This routine is invoked to map the topology values as read + * from the read config mailbox command. If the persistent + * topology feature is supported, the firmware will provide the + * saved topology information to be used in INIT_LINK + * + **/ +#define LINK_FLAGS_DEF 0x0 +#define LINK_FLAGS_P2P 0x1 +#define LINK_FLAGS_LOOP 0x2 +static void +lpfc_map_topology(struct lpfc_hba *phba, struct lpfc_mbx_read_config *rd_config) +{ + u8 ptv, tf, pt; + + ptv = bf_get(lpfc_mbx_rd_conf_ptv, rd_config); + tf = bf_get(lpfc_mbx_rd_conf_tf, rd_config); + pt = bf_get(lpfc_mbx_rd_conf_pt, rd_config); + + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2027 Read Config Data : ptv:0x%x, tf:0x%x pt:0x%x", + ptv, tf, pt); + if (!ptv) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2019 FW does not support persistent topology " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + return; + } + /* FW supports persistent topology - override module parameter value */ + phba->hba_flag |= HBA_PERSISTENT_TOPO; + switch (phba->pcidev->device) { + case PCI_DEVICE_ID_LANCER_G7_FC: + case PCI_DEVICE_ID_LANCER_G6_FC: + if (!tf) { + phba->cfg_topology = ((pt == LINK_FLAGS_LOOP) + ? FLAGS_TOPOLOGY_MODE_LOOP + : FLAGS_TOPOLOGY_MODE_PT_PT); + } else { + phba->hba_flag &= ~HBA_PERSISTENT_TOPO; + } + break; + default: /* G5 */ + if (tf) { + /* If topology failover set - pt is '0' or '1' */ + phba->cfg_topology = (pt ? FLAGS_TOPOLOGY_MODE_PT_LOOP : + FLAGS_TOPOLOGY_MODE_LOOP_PT); + } else { + phba->cfg_topology = ((pt == LINK_FLAGS_P2P) + ? FLAGS_TOPOLOGY_MODE_PT_PT + : FLAGS_TOPOLOGY_MODE_LOOP); + } + break; + } + if (phba->hba_flag & HBA_PERSISTENT_TOPO) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2020 Using persistent topology value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } else { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "2021 Invalid topology values from FW " + "Using driver parameter defined value [%s]", + lpfc_topo_to_str[phba->cfg_topology]); + } +} + /** * lpfc_sli4_read_config - Get the config parameters. * @phba: pointer to lpfc hba data structure. @@ -8307,6 +8426,10 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config); phba->sli4_hba.max_cfg_param.max_xri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config); + /* Reduce resource usage in kdump environment */ + if (is_kdump_kernel() && + phba->sli4_hba.max_cfg_param.max_xri > 512) + phba->sli4_hba.max_cfg_param.max_xri = 512; phba->sli4_hba.max_cfg_param.xri_base = bf_get(lpfc_mbx_rd_conf_xri_base, rd_config); phba->sli4_hba.max_cfg_param.max_vpi = @@ -8341,6 +8464,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ? (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0; phba->max_vports = phba->max_vpi; + lpfc_map_topology(phba, rd_config); lpfc_printf_log(phba, KERN_INFO, LOG_SLI, "2003 cfg params Extents? %d " "XRI(B:%d M:%d), " @@ -8380,11 +8504,6 @@ lpfc_sli4_read_config(struct lpfc_hba *phba) */ qmin -= 4; - /* If NVME is configured, double the number of CQ/WQs needed */ - if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) && - !phba->nvmet_support) - qmin /= 2; - /* Check to see if there is enough for NVME */ if ((phba->cfg_irq_chann > qmin) || (phba->cfg_hdw_queue > qmin)) { @@ -8619,8 +8738,8 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) */ if (phba->nvmet_support) { - if (phba->cfg_irq_chann < phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_irq_chann; + if (phba->cfg_hdw_queue < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_hdw_queue; if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; } @@ -8641,51 +8760,14 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) } static int -lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) +lpfc_alloc_io_wq_cq(struct lpfc_hba *phba, int idx) { struct lpfc_queue *qdesc; + u32 wqesize; int cpu; - cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, - phba->sli4_hba.cq_esize, - LPFC_CQE_EXP_COUNT, cpu); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0508 Failed allocate fast-path NVME CQ (%d)\n", - wqidx); - return 1; - } - qdesc->qe_valid = 1; - qdesc->hdwq = wqidx; - qdesc->chann = cpu; - phba->sli4_hba.hdwq[wqidx].nvme_cq = qdesc; - - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, - LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT, - cpu); - if (!qdesc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0509 Failed allocate fast-path NVME WQ (%d)\n", - wqidx); - return 1; - } - qdesc->hdwq = wqidx; - qdesc->chann = wqidx; - phba->sli4_hba.hdwq[wqidx].nvme_wq = qdesc; - list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); - return 0; -} - -static int -lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) -{ - struct lpfc_queue *qdesc; - uint32_t wqesize; - int cpu; - - cpu = lpfc_find_cpu_handle(phba, wqidx, LPFC_FIND_BY_HDWQ); - /* Create Fast Path FCP CQs */ + cpu = lpfc_find_cpu_handle(phba, idx, LPFC_FIND_BY_HDWQ); + /* Create Fast Path IO CQs */ if (phba->enab_exp_wqcq_pages) /* Increase the CQ size when WQEs contain an embedded cdb */ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, @@ -8698,15 +8780,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) phba->sli4_hba.cq_ecount, cpu); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); + "0499 Failed allocate fast-path IO CQ (%d)\n", idx); return 1; } qdesc->qe_valid = 1; - qdesc->hdwq = wqidx; + qdesc->hdwq = idx; qdesc->chann = cpu; - phba->sli4_hba.hdwq[wqidx].fcp_cq = qdesc; + phba->sli4_hba.hdwq[idx].io_cq = qdesc; - /* Create Fast Path FCP WQs */ + /* Create Fast Path IO WQs */ if (phba->enab_exp_wqcq_pages) { /* Increase the WQ size when WQEs contain an embedded cdb */ wqesize = (phba->fcp_embed_io) ? @@ -8721,13 +8803,13 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "0503 Failed allocate fast-path FCP WQ (%d)\n", - wqidx); + "0503 Failed allocate fast-path IO WQ (%d)\n", + idx); return 1; } - qdesc->hdwq = wqidx; - qdesc->chann = wqidx; - phba->sli4_hba.hdwq[wqidx].fcp_wq = qdesc; + qdesc->hdwq = idx; + qdesc->chann = cpu; + phba->sli4_hba.hdwq[idx].io_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); return 0; } @@ -8791,12 +8873,13 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) qp->get_io_bufs = 0; qp->put_io_bufs = 0; qp->total_io_bufs = 0; - spin_lock_init(&qp->abts_scsi_buf_list_lock); - INIT_LIST_HEAD(&qp->lpfc_abts_scsi_buf_list); + spin_lock_init(&qp->abts_io_buf_list_lock); + INIT_LIST_HEAD(&qp->lpfc_abts_io_buf_list); qp->abts_scsi_io_bufs = 0; - spin_lock_init(&qp->abts_nvme_buf_list_lock); - INIT_LIST_HEAD(&qp->lpfc_abts_nvme_buf_list); qp->abts_nvme_io_bufs = 0; + INIT_LIST_HEAD(&qp->sgl_list); + INIT_LIST_HEAD(&qp->cmd_rsp_buf_list); + spin_lock_init(&qp->hdwq_lock); } } @@ -8862,7 +8945,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } qdesc->qe_valid = 1; qdesc->hdwq = cpup->hdwq; - qdesc->chann = cpu; /* First CPU this EQ is affinitised to */ + qdesc->chann = cpu; /* First CPU this EQ is affinitized to */ qdesc->last_cpu = qdesc->chann; /* Save the allocated EQ in the Hardware Queue */ @@ -8893,41 +8976,31 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq; } - /* Allocate SCSI SLI4 CQ/WQs */ + /* Allocate IO Path SLI4 CQ/WQs */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { - if (lpfc_alloc_fcp_wq_cq(phba, idx)) + if (lpfc_alloc_io_wq_cq(phba, idx)) goto out_error; } - /* Allocate NVME SLI4 CQ/WQs */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { - if (lpfc_alloc_nvme_wq_cq(phba, idx)) - goto out_error; - } - - if (phba->nvmet_support) { - for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { - cpu = lpfc_find_cpu_handle(phba, idx, - LPFC_FIND_BY_HDWQ); - qdesc = lpfc_sli4_queue_alloc( - phba, + if (phba->nvmet_support) { + for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { + cpu = lpfc_find_cpu_handle(phba, idx, + LPFC_FIND_BY_HDWQ); + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount, cpu); - if (!qdesc) { - lpfc_printf_log( - phba, KERN_ERR, LOG_INIT, + if (!qdesc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3142 Failed allocate NVME " "CQ Set (%d)\n", idx); - goto out_error; - } - qdesc->qe_valid = 1; - qdesc->hdwq = idx; - qdesc->chann = cpu; - phba->sli4_hba.nvmet_cqset[idx] = qdesc; + goto out_error; } + qdesc->qe_valid = 1; + qdesc->hdwq = idx; + qdesc->chann = cpu; + phba->sli4_hba.nvmet_cqset[idx] = qdesc; } } @@ -8958,7 +9031,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_error; } qdesc->qe_valid = 1; - qdesc->chann = 0; + qdesc->chann = cpu; phba->sli4_hba.els_cq = qdesc; @@ -8976,7 +9049,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "0505 Failed allocate slow-path MQ\n"); goto out_error; } - qdesc->chann = 0; + qdesc->chann = cpu; phba->sli4_hba.mbx_wq = qdesc; /* @@ -8992,7 +9065,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "0504 Failed allocate slow-path ELS WQ\n"); goto out_error; } - qdesc->chann = 0; + qdesc->chann = cpu; phba->sli4_hba.els_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); @@ -9006,7 +9079,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "6079 Failed allocate NVME LS CQ\n"); goto out_error; } - qdesc->chann = 0; + qdesc->chann = cpu; qdesc->qe_valid = 1; phba->sli4_hba.nvmels_cq = qdesc; @@ -9019,7 +9092,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) "6080 Failed allocate NVME LS WQ\n"); goto out_error; } - qdesc->chann = 0; + qdesc->chann = cpu; phba->sli4_hba.nvmels_wq = qdesc; list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list); } @@ -9101,7 +9174,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } } -#if defined(BUILD_NVME) /* Clear NVME stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { @@ -9109,7 +9181,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) sizeof(phba->sli4_hba.hdwq[idx].nvme_cstat)); } } -#endif /* Clear SCSI stats */ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) { @@ -9162,15 +9233,13 @@ lpfc_sli4_release_hdwq(struct lpfc_hba *phba) /* Loop thru all Hardware Queues */ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { /* Free the CQ/WQ corresponding to the Hardware Queue */ - lpfc_sli4_queue_free(hdwq[idx].fcp_cq); - lpfc_sli4_queue_free(hdwq[idx].nvme_cq); - lpfc_sli4_queue_free(hdwq[idx].fcp_wq); - lpfc_sli4_queue_free(hdwq[idx].nvme_wq); - hdwq[idx].hba_eq = NULL; - hdwq[idx].fcp_cq = NULL; - hdwq[idx].nvme_cq = NULL; - hdwq[idx].fcp_wq = NULL; - hdwq[idx].nvme_wq = NULL; + lpfc_sli4_queue_free(hdwq[idx].io_cq); + lpfc_sli4_queue_free(hdwq[idx].io_wq); + hdwq[idx].io_cq = NULL; + hdwq[idx].io_wq = NULL; + if (phba->cfg_xpsgl && !phba->nvmet_support) + lpfc_free_sgl_per_hdwq(phba, &hdwq[idx]); + lpfc_free_cmd_rsp_buf_per_hdwq(phba, &hdwq[idx]); } /* Loop thru all IRQ vectors */ for (idx = 0; idx < phba->cfg_irq_chann; idx++) { @@ -9210,6 +9279,8 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) } spin_unlock_irq(&phba->hbalock); + lpfc_sli4_cleanup_poll_list(phba); + /* Release HBA eqs */ if (phba->sli4_hba.hdwq) lpfc_sli4_release_hdwq(phba); @@ -9370,8 +9441,7 @@ lpfc_setup_cq_lookup(struct lpfc_hba *phba) list_for_each_entry(childq, &eq->child_list, list) { if (childq->queue_id > phba->sli4_hba.cq_max) continue; - if ((childq->subtype == LPFC_FCP) || - (childq->subtype == LPFC_NVME)) + if (childq->subtype == LPFC_IO) phba->sli4_hba.cq_lookup[childq->queue_id] = childq; } @@ -9497,31 +9567,6 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) } /* Loop thru all Hardware Queues */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - cpu = lpfc_find_cpu_handle(phba, qidx, - LPFC_FIND_BY_HDWQ); - cpup = &phba->sli4_hba.cpu_map[cpu]; - - /* Create the CQ/WQ corresponding to the - * Hardware Queue - */ - rc = lpfc_create_wq_cq(phba, - phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, - qp[qidx].nvme_cq, - qp[qidx].nvme_wq, - &phba->sli4_hba.hdwq[qidx].nvme_cq_map, - qidx, LPFC_NVME); - if (rc) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6123 Failed to setup fastpath " - "NVME WQ/CQ (%d), rc = 0x%x\n", - qidx, (uint32_t)rc); - goto out_destroy; - } - } - } - for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ); cpup = &phba->sli4_hba.cpu_map[cpu]; @@ -9529,14 +9574,15 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) /* Create the CQ/WQ corresponding to the Hardware Queue */ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hdwq[cpup->hdwq].hba_eq, - qp[qidx].fcp_cq, - qp[qidx].fcp_wq, - &phba->sli4_hba.hdwq[qidx].fcp_cq_map, - qidx, LPFC_FCP); + qp[qidx].io_cq, + qp[qidx].io_wq, + &phba->sli4_hba.hdwq[qidx].io_cq_map, + qidx, + LPFC_IO); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0535 Failed to setup fastpath " - "FCP WQ/CQ (%d), rc = 0x%x\n", + "IO WQ/CQ (%d), rc = 0x%x\n", qidx, (uint32_t)rc); goto out_destroy; } @@ -9836,10 +9882,8 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { /* Destroy the CQ/WQ corresponding to Hardware Queue */ qp = &phba->sli4_hba.hdwq[qidx]; - lpfc_wq_destroy(phba, qp->fcp_wq); - lpfc_wq_destroy(phba, qp->nvme_wq); - lpfc_cq_destroy(phba, qp->fcp_cq); - lpfc_cq_destroy(phba, qp->nvme_cq); + lpfc_wq_destroy(phba, qp->io_wq); + lpfc_cq_destroy(phba, qp->io_cq); } /* Loop thru all IRQ vectors */ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) { @@ -10397,6 +10441,8 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba) case LPFC_SLI_INTF_IF_TYPE_6: iounmap(phba->sli4_hba.drbl_regs_memmap_p); iounmap(phba->sli4_hba.conf_regs_memmap_p); + if (phba->sli4_hba.dpp_regs_memmap_p) + iounmap(phba->sli4_hba.dpp_regs_memmap_p); break; case LPFC_SLI_INTF_IF_TYPE_1: default: @@ -10658,7 +10704,6 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match) */ if ((match == LPFC_FIND_BY_EQ) && (cpup->flag & LPFC_CPU_FIRST_IRQ) && - (cpup->irq != LPFC_VECTOR_MAP_EMPTY) && (cpup->eq == id)) return cpu; @@ -10696,6 +10741,75 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu, } #endif +/* + * lpfc_assign_eq_map_info - Assigns eq for vector_map structure + * @phba: pointer to lpfc hba data structure. + * @eqidx: index for eq and irq vector + * @flag: flags to set for vector_map structure + * @cpu: cpu used to index vector_map structure + * + * The routine assigns eq info into vector_map structure + */ +static inline void +lpfc_assign_eq_map_info(struct lpfc_hba *phba, uint16_t eqidx, uint16_t flag, + unsigned int cpu) +{ + struct lpfc_vector_map_info *cpup = &phba->sli4_hba.cpu_map[cpu]; + struct lpfc_hba_eq_hdl *eqhdl = lpfc_get_eq_hdl(eqidx); + + cpup->eq = eqidx; + cpup->flag |= flag; + + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3336 Set Affinity: CPU %d irq %d eq %d flag x%x\n", + cpu, eqhdl->irq, cpup->eq, cpup->flag); +} + +/** + * lpfc_cpu_map_array_init - Initialize cpu_map structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the cpu_map array structure + */ +static void +lpfc_cpu_map_array_init(struct lpfc_hba *phba) +{ + struct lpfc_vector_map_info *cpup; + struct lpfc_eq_intr_info *eqi; + int cpu; + + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; + cpup->core_id = LPFC_VECTOR_MAP_EMPTY; + cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; + cpup->eq = LPFC_VECTOR_MAP_EMPTY; + cpup->flag = 0; + eqi = per_cpu_ptr(phba->sli4_hba.eq_info, cpu); + INIT_LIST_HEAD(&eqi->list); + eqi->icnt = 0; + } +} + +/** + * lpfc_hba_eq_hdl_array_init - Initialize hba_eq_hdl structure + * @phba: pointer to lpfc hba data structure. + * + * The routine initializes the hba_eq_hdl array structure + */ +static void +lpfc_hba_eq_hdl_array_init(struct lpfc_hba *phba) +{ + struct lpfc_hba_eq_hdl *eqhdl; + int i; + + for (i = 0; i < phba->cfg_irq_chann; i++) { + eqhdl = lpfc_get_eq_hdl(i); + eqhdl->irq = LPFC_VECTOR_MAP_EMPTY; + eqhdl->phba = phba; + } +} + /** * lpfc_cpu_affinity_check - Check vector CPU affinity mappings * @phba: pointer to lpfc hba data structure. @@ -10709,27 +10823,15 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu, static void lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) { - int i, cpu, idx, new_cpu, start_cpu, first_cpu; + int i, cpu, idx, next_idx, new_cpu, start_cpu, first_cpu; int max_phys_id, min_phys_id; int max_core_id, min_core_id; struct lpfc_vector_map_info *cpup; struct lpfc_vector_map_info *new_cpup; - const struct cpumask *maskp; #ifdef CONFIG_X86 struct cpuinfo_x86 *cpuinfo; #endif - /* Init cpu_map array */ - for_each_possible_cpu(cpu) { - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->phys_id = LPFC_VECTOR_MAP_EMPTY; - cpup->core_id = LPFC_VECTOR_MAP_EMPTY; - cpup->hdwq = LPFC_VECTOR_MAP_EMPTY; - cpup->eq = LPFC_VECTOR_MAP_EMPTY; - cpup->irq = LPFC_VECTOR_MAP_EMPTY; - cpup->flag = 0; - } - max_phys_id = 0; min_phys_id = LPFC_VECTOR_MAP_EMPTY; max_core_id = 0; @@ -10751,8 +10853,8 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) #endif lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3328 CPU physid %d coreid %d\n", - cpup->phys_id, cpup->core_id); + "3328 CPU %d physid %d coreid %d flag x%x\n", + cpu, cpup->phys_id, cpup->core_id, cpup->flag); if (cpup->phys_id > max_phys_id) max_phys_id = cpup->phys_id; @@ -10765,65 +10867,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) min_core_id = cpup->core_id; } - for_each_possible_cpu(i) { - struct lpfc_eq_intr_info *eqi = - per_cpu_ptr(phba->sli4_hba.eq_info, i); - - INIT_LIST_HEAD(&eqi->list); - eqi->icnt = 0; - } - - /* This loop sets up all CPUs that are affinitized with a - * irq vector assigned to the driver. All affinitized CPUs - * will get a link to that vectors IRQ and EQ. - * - * NULL affinity mask handling: - * If irq count is greater than one, log an error message. - * If the null mask is received for the first irq, find the - * first present cpu, and assign the eq index to ensure at - * least one EQ is assigned. - */ - for (idx = 0; idx < phba->cfg_irq_chann; idx++) { - /* Get a CPU mask for all CPUs affinitized to this vector */ - maskp = pci_irq_get_affinity(phba->pcidev, idx); - if (!maskp) { - if (phba->cfg_irq_chann > 1) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3329 No affinity mask found " - "for vector %d (%d)\n", - idx, phba->cfg_irq_chann); - if (!idx) { - cpu = cpumask_first(cpu_present_mask); - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->eq = idx; - cpup->irq = pci_irq_vector(phba->pcidev, idx); - cpup->flag |= LPFC_CPU_FIRST_IRQ; - } - break; - } - - i = 0; - /* Loop through all CPUs associated with vector idx */ - for_each_cpu_and(cpu, maskp, cpu_present_mask) { - /* Set the EQ index and IRQ for that vector */ - cpup = &phba->sli4_hba.cpu_map[cpu]; - cpup->eq = idx; - cpup->irq = pci_irq_vector(phba->pcidev, idx); - - lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "3336 Set Affinity: CPU %d " - "irq %d eq %d\n", - cpu, cpup->irq, cpup->eq); - - /* If this is the first CPU thats assigned to this - * vector, set LPFC_CPU_FIRST_IRQ. - */ - if (!i) - cpup->flag |= LPFC_CPU_FIRST_IRQ; - i++; - } - } - /* After looking at each irq vector assigned to this pcidev, its * possible to see that not ALL CPUs have been accounted for. * Next we will set any unassigned (unaffinitized) cpu map @@ -10849,7 +10892,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && - (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) && + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) && (new_cpup->phys_id == cpup->phys_id)) goto found_same; new_cpu = cpumask_next( @@ -10862,7 +10905,6 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors) found_same: /* We found a matching phys_id, so copy the IRQ info */ cpup->eq = new_cpup->eq; - cpup->irq = new_cpup->irq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries @@ -10874,9 +10916,10 @@ found_same: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3337 Set Affinity: CPU %d " - "irq %d from id %d same " + "eq %d from peer cpu %d same " "phys_id (%d)\n", - cpu, cpup->irq, new_cpu, cpup->phys_id); + cpu, cpup->eq, new_cpu, + cpup->phys_id); } } @@ -10900,7 +10943,7 @@ found_same: for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) && - (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY)) + (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY)) goto found_any; new_cpu = cpumask_next( new_cpu, cpu_present_mask); @@ -10910,13 +10953,12 @@ found_same: /* We should never leave an entry unassigned */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3339 Set Affinity: CPU %d " - "irq %d UNASSIGNED\n", - cpup->hdwq, cpup->irq); + "eq %d UNASSIGNED\n", + cpup->hdwq, cpup->eq); continue; found_any: /* We found an available entry, copy the IRQ info */ cpup->eq = new_cpup->eq; - cpup->irq = new_cpup->irq; /* Bump start_cpu to the next slot to minmize the * chance of having multiple unassigned CPU entries @@ -10928,75 +10970,126 @@ found_any: lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "3338 Set Affinity: CPU %d " - "irq %d from id %d (%d/%d)\n", - cpu, cpup->irq, new_cpu, + "eq %d from peer cpu %d (%d/%d)\n", + cpu, cpup->eq, new_cpu, new_cpup->phys_id, new_cpup->core_id); } } - /* Finally we need to associate a hdwq with each cpu_map entry + /* Assign hdwq indices that are unique across all cpus in the map + * that are also FIRST_CPUs. + */ + idx = 0; + for_each_present_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + + /* Only FIRST IRQs get a hdwq index assignment. */ + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + continue; + + /* 1 to 1, the first LPFC_CPU_FIRST_IRQ cpus to a unique hdwq */ + cpup->hdwq = idx; + idx++; + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3333 Set Affinity: CPU %d (phys %d core %d): " + "hdwq %d eq %d flg x%x\n", + cpu, cpup->phys_id, cpup->core_id, + cpup->hdwq, cpup->eq, cpup->flag); + } + /* Associate a hdwq with each cpu_map entry * This will be 1 to 1 - hdwq to cpu, unless there are less * hardware queues then CPUs. For that case we will just round-robin * the available hardware queues as they get assigned to CPUs. + * The next_idx is the idx from the FIRST_CPU loop above to account + * for irq_chann < hdwq. The idx is used for round-robin assignments + * and needs to start at 0. */ - idx = 0; + next_idx = idx; start_cpu = 0; + idx = 0; for_each_present_cpu(cpu) { cpup = &phba->sli4_hba.cpu_map[cpu]; - if (idx >= phba->cfg_hdw_queue) { - /* We need to reuse a Hardware Queue for another CPU, - * so be smart about it and pick one that has its - * IRQ/EQ mapped to the same phys_id (CPU package). - * and core_id. - */ - new_cpu = start_cpu; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; - if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) && - (new_cpup->phys_id == cpup->phys_id) && - (new_cpup->core_id == cpup->core_id)) - goto found_hdwq; - new_cpu = cpumask_next( - new_cpu, cpu_present_mask); - if (new_cpu == nr_cpumask_bits) - new_cpu = first_cpu; - } - /* If we can't match both phys_id and core_id, - * settle for just a phys_id match. - */ - new_cpu = start_cpu; - for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { - new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; - if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) && - (new_cpup->phys_id == cpup->phys_id)) - goto found_hdwq; - new_cpu = cpumask_next( - new_cpu, cpu_present_mask); - if (new_cpu == nr_cpumask_bits) - new_cpu = first_cpu; + /* FIRST cpus are already mapped. */ + if (cpup->flag & LPFC_CPU_FIRST_IRQ) + continue; + + /* If the cfg_irq_chann < cfg_hdw_queue, set the hdwq + * of the unassigned cpus to the next idx so that all + * hdw queues are fully utilized. + */ + if (next_idx < phba->cfg_hdw_queue) { + cpup->hdwq = next_idx; + next_idx++; + continue; + } + + /* Not a First CPU and all hdw_queues are used. Reuse a + * Hardware Queue for another CPU, so be smart about it + * and pick one that has its IRQ/EQ mapped to the same phys_id + * (CPU package) and core_id. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && + new_cpup->phys_id == cpup->phys_id && + new_cpup->core_id == cpup->core_id) { + goto found_hdwq; } + new_cpu = cpumask_next(new_cpu, cpu_present_mask); + if (new_cpu == nr_cpumask_bits) + new_cpu = first_cpu; + } - /* Otherwise just round robin on cfg_hdw_queue */ - cpup->hdwq = idx % phba->cfg_hdw_queue; - goto logit; -found_hdwq: - /* We found an available entry, copy the IRQ info */ - start_cpu = cpumask_next(new_cpu, cpu_present_mask); - if (start_cpu == nr_cpumask_bits) - start_cpu = first_cpu; - cpup->hdwq = new_cpup->hdwq; - } else { - /* 1 to 1, CPU to hdwq */ - cpup->hdwq = idx; + /* If we can't match both phys_id and core_id, + * settle for just a phys_id match. + */ + new_cpu = start_cpu; + for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { + new_cpup = &phba->sli4_hba.cpu_map[new_cpu]; + if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY && + new_cpup->phys_id == cpup->phys_id) + goto found_hdwq; + + new_cpu = cpumask_next(new_cpu, cpu_present_mask); + if (new_cpu == nr_cpumask_bits) + new_cpu = first_cpu; } -logit: + + /* Otherwise just round robin on cfg_hdw_queue */ + cpup->hdwq = idx % phba->cfg_hdw_queue; + idx++; + goto logit; + found_hdwq: + /* We found an available entry, copy the IRQ info */ + start_cpu = cpumask_next(new_cpu, cpu_present_mask); + if (start_cpu == nr_cpumask_bits) + start_cpu = first_cpu; + cpup->hdwq = new_cpup->hdwq; + logit: lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3335 Set Affinity: CPU %d (phys %d core %d): " - "hdwq %d eq %d irq %d flg x%x\n", + "hdwq %d eq %d flg x%x\n", cpu, cpup->phys_id, cpup->core_id, - cpup->hdwq, cpup->eq, cpup->irq, cpup->flag); - idx++; + cpup->hdwq, cpup->eq, cpup->flag); + } + + /* + * Initialize the cpu_map slots for not-present cpus in case + * a cpu is hot-added. Perform a simple hdwq round robin assignment. + */ + idx = 0; + for_each_possible_cpu(cpu) { + cpup = &phba->sli4_hba.cpu_map[cpu]; + if (cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) + continue; + + cpup->hdwq = idx++ % phba->cfg_hdw_queue; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, + "3340 Set Affinity: not present " + "CPU %d hdwq %d\n", + cpu, cpup->hdwq); } /* The cpu_map array will be used later during initialization @@ -11006,11 +11099,280 @@ logit: } /** + * lpfc_cpuhp_get_eq + * + * @phba: pointer to lpfc hba data structure. + * @cpu: cpu going offline + * @eqlist: + */ +static void +lpfc_cpuhp_get_eq(struct lpfc_hba *phba, unsigned int cpu, + struct list_head *eqlist) +{ + const struct cpumask *maskp; + struct lpfc_queue *eq; + cpumask_t tmp; + u16 idx; + + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + maskp = pci_irq_get_affinity(phba->pcidev, idx); + if (!maskp) + continue; + /* + * if irq is not affinitized to the cpu going + * then we don't need to poll the eq attached + * to it. + */ + if (!cpumask_and(&tmp, maskp, cpumask_of(cpu))) + continue; + /* get the cpus that are online and are affini- + * tized to this irq vector. If the count is + * more than 1 then cpuhp is not going to shut- + * down this vector. Since this cpu has not + * gone offline yet, we need >1. + */ + cpumask_and(&tmp, maskp, cpu_online_mask); + if (cpumask_weight(&tmp) > 1) + continue; + + /* Now that we have an irq to shutdown, get the eq + * mapped to this irq. Note: multiple hdwq's in + * the software can share an eq, but eventually + * only eq will be mapped to this vector + */ + eq = phba->sli4_hba.hba_eq_hdl[idx].eq; + list_add(&eq->_poll_list, eqlist); + } +} + +static void __lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + cpuhp_state_remove_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); + /* + * unregistering the instance doesn't stop the polling + * timer. Wait for the poll timer to retire. + */ + synchronize_rcu(); + del_timer_sync(&phba->cpuhp_poll_timer); +} + +static void lpfc_cpuhp_remove(struct lpfc_hba *phba) +{ + if (phba->pport->fc_flag & FC_OFFLINE_MODE) + return; + + __lpfc_cpuhp_remove(phba); +} + +static void lpfc_cpuhp_add(struct lpfc_hba *phba) +{ + if (phba->sli_rev != LPFC_SLI_REV4) + return; + + rcu_read_lock(); + + if (!list_empty(&phba->poll_list)) { + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + } + + rcu_read_unlock(); + + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, + &phba->cpuhp); +} + +static int __lpfc_cpuhp_checks(struct lpfc_hba *phba, int *retval) +{ + if (phba->pport->load_flag & FC_UNLOADING) { + *retval = -EAGAIN; + return true; + } + + if (phba->sli_rev != LPFC_SLI_REV4) { + *retval = 0; + return true; + } + + /* proceed with the hotplug */ + return false; +} + +/** + * lpfc_irq_set_aff - set IRQ affinity + * @eqhdl: EQ handle + * @cpu: cpu to set affinity + * + **/ +static inline void +lpfc_irq_set_aff(struct lpfc_hba_eq_hdl *eqhdl, unsigned int cpu) +{ + cpumask_clear(&eqhdl->aff_mask); + cpumask_set_cpu(cpu, &eqhdl->aff_mask); + irq_set_status_flags(eqhdl->irq, IRQ_NO_BALANCING); + irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); +} + +/** + * lpfc_irq_clear_aff - clear IRQ affinity + * @eqhdl: EQ handle + * + **/ +static inline void +lpfc_irq_clear_aff(struct lpfc_hba_eq_hdl *eqhdl) +{ + cpumask_clear(&eqhdl->aff_mask); + irq_clear_status_flags(eqhdl->irq, IRQ_NO_BALANCING); + irq_set_affinity_hint(eqhdl->irq, &eqhdl->aff_mask); +} + +/** + * lpfc_irq_rebalance - rebalances IRQ affinity according to cpuhp event + * @phba: pointer to HBA context object. + * @cpu: cpu going offline/online + * @offline: true, cpu is going offline. false, cpu is coming online. + * + * If cpu is going offline, we'll try our best effort to find the next + * online cpu on the phba's NUMA node and migrate all offlining IRQ affinities. + * + * If cpu is coming online, reaffinitize the IRQ back to the onlineng cpu. + * + * Note: Call only if cfg_irq_numa is enabled, otherwise rely on + * PCI_IRQ_AFFINITY to auto-manage IRQ affinity. + * + **/ +static void +lpfc_irq_rebalance(struct lpfc_hba *phba, unsigned int cpu, bool offline) +{ + struct lpfc_vector_map_info *cpup; + struct cpumask *aff_mask; + unsigned int cpu_select, cpu_next, idx; + const struct cpumask *numa_mask; + + if (!phba->cfg_irq_numa) + return; + + numa_mask = &phba->sli4_hba.numa_mask; + + if (!cpumask_test_cpu(cpu, numa_mask)) + return; + + cpup = &phba->sli4_hba.cpu_map[cpu]; + + if (!(cpup->flag & LPFC_CPU_FIRST_IRQ)) + return; + + if (offline) { + /* Find next online CPU on NUMA node */ + cpu_next = cpumask_next_wrap(cpu, numa_mask, cpu, true); + cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu_next); + + /* Found a valid CPU */ + if ((cpu_select < nr_cpu_ids) && (cpu_select != cpu)) { + /* Go through each eqhdl and ensure offlining + * cpu aff_mask is migrated + */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) { + aff_mask = lpfc_get_aff_mask(idx); + + /* Migrate affinity */ + if (cpumask_test_cpu(cpu, aff_mask)) + lpfc_irq_set_aff(lpfc_get_eq_hdl(idx), + cpu_select); + } + } else { + /* Rely on irqbalance if no online CPUs left on NUMA */ + for (idx = 0; idx < phba->cfg_irq_chann; idx++) + lpfc_irq_clear_aff(lpfc_get_eq_hdl(idx)); + } + } else { + /* Migrate affinity back to this CPU */ + lpfc_irq_set_aff(lpfc_get_eq_hdl(cpup->eq), cpu); + } +} + +static int lpfc_cpu_offline(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + LIST_HEAD(eqlist); + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, true); + + lpfc_cpuhp_get_eq(phba, cpu, &eqlist); + + /* start polling on these eq's */ + list_for_each_entry_safe(eq, next, &eqlist, _poll_list) { + list_del_init(&eq->_poll_list); + lpfc_sli4_start_polling(eq); + } + + return 0; +} + +static int lpfc_cpu_online(unsigned int cpu, struct hlist_node *node) +{ + struct lpfc_hba *phba = hlist_entry_safe(node, struct lpfc_hba, cpuhp); + struct lpfc_queue *eq, *next; + unsigned int n; + int retval; + + if (!phba) { + WARN_ONCE(!phba, "cpu: %u. phba:NULL", raw_smp_processor_id()); + return 0; + } + + if (__lpfc_cpuhp_checks(phba, &retval)) + return retval; + + lpfc_irq_rebalance(phba, cpu, false); + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) { + n = lpfc_find_cpu_handle(phba, eq->hdwq, LPFC_FIND_BY_HDWQ); + if (n == cpu) + lpfc_sli4_stop_polling(eq); + } + + return 0; +} + +/** * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI-X interrupt vectors to device - * with SLI-4 interface spec. + * with SLI-4 interface spec. It also allocates MSI-X vectors and maps them + * to cpus on the system. + * + * When cfg_irq_numa is enabled, the adapter will only allocate vectors for + * the number of cpus on the same numa node as this adapter. The vectors are + * allocated without requesting OS affinity mapping. A vector will be + * allocated and assigned to each online and offline cpu. If the cpu is + * online, then affinity will be set to that cpu. If the cpu is offline, then + * affinity will be set to the nearest peer cpu within the numa node that is + * online. If there are no online cpus within the numa node, affinity is not + * assigned and the OS may do as it pleases. Note: cpu vector affinity mapping + * is consistent with the way cpu online/offline is handled when cfg_irq_numa is + * configured. + * + * If numa mode is not enabled and there is more than 1 vector allocated, then + * the driver relies on the managed irq interface where the OS assigns vector to + * cpu affinity. The driver will then use that affinity mapping to setup its + * cpu mapping table. * * Return codes * 0 - successful @@ -11021,13 +11383,31 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) { int vectors, rc, index; char *name; + const struct cpumask *numa_mask = NULL; + unsigned int cpu = 0, cpu_cnt = 0, cpu_select = nr_cpu_ids; + struct lpfc_hba_eq_hdl *eqhdl; + const struct cpumask *maskp; + bool first; + unsigned int flags = PCI_IRQ_MSIX; /* Set up MSI-X multi-message vectors */ vectors = phba->cfg_irq_chann; - rc = pci_alloc_irq_vectors(phba->pcidev, - 1, - vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); + if (phba->cfg_irq_numa) { + numa_mask = &phba->sli4_hba.numa_mask; + cpu_cnt = cpumask_weight(numa_mask); + vectors = min(phba->cfg_irq_chann, cpu_cnt); + + /* cpu: iterates over numa_mask including offline or online + * cpu_select: iterates over online numa_mask to set affinity + */ + cpu = cpumask_first(numa_mask); + cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); + } else { + flags |= PCI_IRQ_AFFINITY; + } + + rc = pci_alloc_irq_vectors(phba->pcidev, 1, vectors, flags); if (rc < 0) { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0484 PCI enable MSI-X failed (%d)\n", rc); @@ -11037,23 +11417,61 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) /* Assign MSI-X vectors to interrupt handlers */ for (index = 0; index < vectors; index++) { - name = phba->sli4_hba.hba_eq_hdl[index].handler_name; + eqhdl = lpfc_get_eq_hdl(index); + name = eqhdl->handler_name; memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ); snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ, LPFC_DRIVER_HANDLER_NAME"%d", index); - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; + eqhdl->idx = index; rc = request_irq(pci_irq_vector(phba->pcidev, index), &lpfc_sli4_hba_intr_handler, 0, - name, - &phba->sli4_hba.hba_eq_hdl[index]); + name, eqhdl); if (rc) { lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0486 MSI-X fast-path (%d) " "request_irq failed (%d)\n", index, rc); goto cfg_fail_out; } + + eqhdl->irq = pci_irq_vector(phba->pcidev, index); + + if (phba->cfg_irq_numa) { + /* If found a neighboring online cpu, set affinity */ + if (cpu_select < nr_cpu_ids) + lpfc_irq_set_aff(eqhdl, cpu_select); + + /* Assign EQ to cpu_map */ + lpfc_assign_eq_map_info(phba, index, + LPFC_CPU_FIRST_IRQ, + cpu); + + /* Iterate to next offline or online cpu in numa_mask */ + cpu = cpumask_next(cpu, numa_mask); + + /* Find next online cpu in numa_mask to set affinity */ + cpu_select = lpfc_next_online_numa_cpu(numa_mask, cpu); + } else if (vectors == 1) { + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, index, LPFC_CPU_FIRST_IRQ, + cpu); + } else { + maskp = pci_irq_get_affinity(phba->pcidev, index); + + first = true; + /* Loop through all CPUs associated with vector index */ + for_each_cpu_and(cpu, maskp, cpu_present_mask) { + /* If this is the first CPU thats assigned to + * this vector, set LPFC_CPU_FIRST_IRQ. + */ + lpfc_assign_eq_map_info(phba, index, + first ? + LPFC_CPU_FIRST_IRQ : 0, + cpu); + if (first) + first = false; + } + } } if (vectors != phba->cfg_irq_chann) { @@ -11063,17 +11481,18 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba) phba->cfg_irq_chann, vectors); if (phba->cfg_irq_chann > vectors) phba->cfg_irq_chann = vectors; - if (phba->nvmet_support && (phba->cfg_nvmet_mrq > vectors)) - phba->cfg_nvmet_mrq = vectors; } return rc; cfg_fail_out: /* free the irq already requested */ - for (--index; index >= 0; index--) - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); + for (--index; index >= 0; index--) { + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + irq_set_affinity_hint(eqhdl->irq, NULL); + free_irq(eqhdl->irq, eqhdl); + } /* Unconfigure MSI-X capability structure */ pci_free_irq_vectors(phba->pcidev); @@ -11087,10 +11506,10 @@ vec_fail_out: * @phba: pointer to lpfc hba data structure. * * This routine is invoked to enable the MSI interrupt mode to device with - * SLI-4 interface spec. The kernel function pci_enable_msi() is called - * to enable the MSI vector. The device driver is responsible for calling - * the request_irq() to register MSI vector with a interrupt the handler, - * which is done in this function. + * SLI-4 interface spec. The kernel function pci_alloc_irq_vectors() is + * called to enable the MSI vector. The device driver is responsible for + * calling the request_irq() to register MSI vector with a interrupt the + * handler, which is done in this function. * * Return codes * 0 - successful @@ -11100,29 +11519,38 @@ static int lpfc_sli4_enable_msi(struct lpfc_hba *phba) { int rc, index; + unsigned int cpu; + struct lpfc_hba_eq_hdl *eqhdl; - rc = pci_enable_msi(phba->pcidev); - if (!rc) + rc = pci_alloc_irq_vectors(phba->pcidev, 1, 1, + PCI_IRQ_MSI | PCI_IRQ_AFFINITY); + if (rc > 0) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0487 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0488 PCI enable MSI mode failed (%d)\n", rc); - return rc; + return rc ? rc : -1; } rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler, 0, LPFC_DRIVER_NAME, phba); if (rc) { - pci_disable_msi(phba->pcidev); + pci_free_irq_vectors(phba->pcidev); lpfc_printf_log(phba, KERN_WARNING, LOG_INIT, "0490 MSI request_irq failed (%d)\n", rc); return rc; } + eqhdl = lpfc_get_eq_hdl(0); + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, cpu); + for (index = 0; index < phba->cfg_irq_chann; index++) { - phba->sli4_hba.hba_eq_hdl[index].idx = index; - phba->sli4_hba.hba_eq_hdl[index].phba = phba; + eqhdl = lpfc_get_eq_hdl(index); + eqhdl->idx = index; } return 0; @@ -11180,15 +11608,21 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode) IRQF_SHARED, LPFC_DRIVER_NAME, phba); if (!retval) { struct lpfc_hba_eq_hdl *eqhdl; + unsigned int cpu; /* Indicate initialization to INTx mode */ phba->intr_type = INTx; intr_mode = 0; + eqhdl = lpfc_get_eq_hdl(0); + eqhdl->irq = pci_irq_vector(phba->pcidev, 0); + + cpu = cpumask_first(cpu_present_mask); + lpfc_assign_eq_map_info(phba, 0, LPFC_CPU_FIRST_IRQ, + cpu); for (idx = 0; idx < phba->cfg_irq_chann; idx++) { - eqhdl = &phba->sli4_hba.hba_eq_hdl[idx]; + eqhdl = lpfc_get_eq_hdl(idx); eqhdl->idx = idx; - eqhdl->phba = phba; } } } @@ -11210,14 +11644,14 @@ lpfc_sli4_disable_intr(struct lpfc_hba *phba) /* Disable the currently initialized interrupt mode */ if (phba->intr_type == MSIX) { int index; + struct lpfc_hba_eq_hdl *eqhdl; /* Free up MSI-X multi-message vectors */ for (index = 0; index < phba->cfg_irq_chann; index++) { - irq_set_affinity_hint( - pci_irq_vector(phba->pcidev, index), - NULL); - free_irq(pci_irq_vector(phba->pcidev, index), - &phba->sli4_hba.hba_eq_hdl[index]); + eqhdl = lpfc_get_eq_hdl(index); + lpfc_irq_clear_aff(eqhdl); + irq_set_affinity_hint(eqhdl->irq, NULL); + free_irq(eqhdl->irq, eqhdl); } } else { free_irq(phba->pcidev->irq, phba); @@ -11280,11 +11714,10 @@ static void lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) { struct lpfc_sli4_hdw_queue *qp; - int idx, ccnt, fcnt; + int idx, ccnt; int wait_time = 0; int io_xri_cmpl = 1; int nvmet_xri_cmpl = 1; - int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); /* Driver just aborted IOs during the hba_unset process. Pause @@ -11298,32 +11731,21 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) lpfc_nvme_wait_for_io_drain(phba); ccnt = 0; - fcnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; - fcp_xri_cmpl = list_empty( - &qp->lpfc_abts_scsi_buf_list); - if (!fcp_xri_cmpl) /* if list is NOT empty */ - fcnt++; - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - io_xri_cmpl = list_empty( - &qp->lpfc_abts_nvme_buf_list); - if (!io_xri_cmpl) /* if list is NOT empty */ - ccnt++; - } + io_xri_cmpl = list_empty(&qp->lpfc_abts_io_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; } if (ccnt) io_xri_cmpl = 0; - if (fcnt) - fcp_xri_cmpl = 0; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { nvmet_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); } - while (!fcp_xri_cmpl || !els_xri_cmpl || !io_xri_cmpl || - !nvmet_xri_cmpl) { + while (!els_xri_cmpl || !io_xri_cmpl || !nvmet_xri_cmpl) { if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) { if (!nvmet_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -11332,12 +11754,7 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) wait_time/1000); if (!io_xri_cmpl) lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "6100 NVME XRI exchange busy " - "wait time: %d seconds.\n", - wait_time/1000); - if (!fcp_xri_cmpl) - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2877 FCP XRI exchange busy " + "6100 IO XRI exchange busy " "wait time: %d seconds.\n", wait_time/1000); if (!els_xri_cmpl) @@ -11353,24 +11770,15 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) } ccnt = 0; - fcnt = 0; for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; - fcp_xri_cmpl = list_empty( - &qp->lpfc_abts_scsi_buf_list); - if (!fcp_xri_cmpl) /* if list is NOT empty */ - fcnt++; - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - io_xri_cmpl = list_empty( - &qp->lpfc_abts_nvme_buf_list); - if (!io_xri_cmpl) /* if list is NOT empty */ - ccnt++; - } + io_xri_cmpl = list_empty( + &qp->lpfc_abts_io_buf_list); + if (!io_xri_cmpl) /* if list is NOT empty */ + ccnt++; } if (ccnt) io_xri_cmpl = 0; - if (fcnt) - fcp_xri_cmpl = 0; if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { nvmet_xri_cmpl = list_empty( @@ -11435,6 +11843,9 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Wait for completion of device XRI exchange busy */ lpfc_sli4_xri_exchange_busy_wait(phba); + /* per-phba callback de-registration for hotplug event */ + lpfc_cpuhp_remove(phba); + /* Disable PCI subsystem interrupt */ lpfc_sli4_disable_intr(phba); @@ -11606,6 +12017,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) sli4_params->cqav = bf_get(cfg_cqav, mbx_sli4_parameters); sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters); sli4_params->bv1s = bf_get(cfg_bv1s, mbx_sli4_parameters); + sli4_params->pls = bf_get(cfg_pvl, mbx_sli4_parameters); sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt, mbx_sli4_parameters); sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters); @@ -11614,6 +12026,9 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters); phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters); + /* Check for Extended Pre-Registered SGL support */ + phba->cfg_xpsgl = bf_get(cfg_xpsgl, mbx_sli4_parameters); + /* Check for firmware nvme support */ rc = (bf_get(cfg_nvme, mbx_sli4_parameters) && bf_get(cfg_xib, mbx_sli4_parameters)); @@ -11644,6 +12059,7 @@ fcponly: phba->nvme_support = 0; phba->nvmet_support = 0; phba->cfg_nvmet_mrq = 0; + phba->cfg_nvme_seg_cnt = 0; /* If no FC4 type support, move to just SCSI support */ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)) @@ -11652,6 +12068,12 @@ fcponly: } } + /* If the NVME FC4 type is enabled, scale the sg_seg_cnt to + * accommodate 512K and 1M IOs in a single nvme buf. + */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + phba->cfg_sg_seg_cnt = LPFC_MAX_NVME_SEG_CNT; + /* Only embed PBDE for if_type 6, PBDE support requires xib be set */ if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_6) || (!bf_get(cfg_xib, mbx_sli4_parameters))) @@ -11716,6 +12138,14 @@ fcponly: else phba->mds_diags_support = 0; + /* + * Check if the SLI port supports NSLER + */ + if (bf_get(cfg_nsler, mbx_sli4_parameters)) + phba->nsler = 1; + else + phba->nsler = 0; + return 0; } @@ -12144,7 +12574,7 @@ lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba) lpfc_scsi_dev_block(phba); /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); + lpfc_sli_flush_io_rings(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -12174,7 +12604,7 @@ lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba) lpfc_stop_hba_timers(phba); /* Clean up all driver's outstanding SCSI I/Os */ - lpfc_sli_flush_fcp_rings(phba); + lpfc_sli_flush_io_rings(phba); } /** @@ -12359,35 +12789,57 @@ lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba) } -static void +static int lpfc_log_write_firmware_error(struct lpfc_hba *phba, uint32_t offset, uint32_t magic_number, uint32_t ftype, uint32_t fid, uint32_t fsize, const struct firmware *fw) { - if ((offset == ADD_STATUS_FW_NOT_SUPPORTED) || + int rc; + + /* Three cases: (1) FW was not supported on the detected adapter. + * (2) FW update has been locked out administratively. + * (3) Some other error during FW update. + * In each case, an unmaskable message is written to the console + * for admin diagnosis. + */ + if (offset == ADD_STATUS_FW_NOT_SUPPORTED || (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC && - magic_number != MAGIC_NUMER_G6) || + magic_number != MAGIC_NUMBER_G6) || (phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC && - magic_number != MAGIC_NUMER_G7)) + magic_number != MAGIC_NUMBER_G7)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3030 This firmware version is not supported on " - "this HBA model. Device:%x Magic:%x Type:%x " - "ID:%x Size %d %zd\n", - phba->pcidev->device, magic_number, ftype, fid, - fsize, fw->size); - else + "3030 This firmware version is not supported on" + " this HBA model. Device:%x Magic:%x Type:%x " + "ID:%x Size %d %zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EINVAL; + } else if (offset == ADD_STATUS_FW_DOWNLOAD_HW_DISABLED) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3021 Firmware downloads have been prohibited " + "by a system configuration setting on " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + phba->pcidev->device, magic_number, ftype, fid, + fsize, fw->size); + rc = -EACCES; + } else { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3022 FW Download failed. Device:%x Magic:%x Type:%x " - "ID:%x Size %d %zd\n", - phba->pcidev->device, magic_number, ftype, fid, - fsize, fw->size); + "3022 FW Download failed. Add Status x%x " + "Device:%x Magic:%x Type:%x ID:%x Size %d " + "%zd\n", + offset, phba->pcidev->device, magic_number, + ftype, fid, fsize, fw->size); + rc = -EIO; + } + return rc; } - /** * lpfc_write_firmware - attempt to write a firmware image to the port * @fw: pointer to firmware image returned from request_firmware. - * @phba: pointer to lpfc hba data structure. + * @context: pointer to firmware image returned from request_firmware. + * @ret: return value this routine provides to the caller. * **/ static void @@ -12456,8 +12908,12 @@ lpfc_write_firmware(const struct firmware *fw, void *context) rc = lpfc_wr_object(phba, &dma_buffer_list, (fw->size - offset), &offset); if (rc) { - lpfc_log_write_firmware_error(phba, offset, - magic_number, ftype, fid, fsize, fw); + rc = lpfc_log_write_firmware_error(phba, offset, + magic_number, + ftype, + fid, + fsize, + fw); goto release_out; } } @@ -12477,9 +12933,12 @@ release_out: } release_firmware(fw); out: - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "3024 Firmware update done: %d.\n", rc); - return; + if (rc < 0) + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3062 Firmware update error, status %d.\n", rc); + else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3024 Firmware update success: size %d.\n", rc); } /** @@ -12598,6 +13057,12 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) phba->pport = NULL; lpfc_stop_port(phba); + /* Init cpu_map array */ + lpfc_cpu_map_array_init(phba); + + /* Init hba_eq_hdl array */ + lpfc_hba_eq_hdl_array_init(phba); + /* Configure and enable interrupt */ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode); if (intr_mode == LPFC_INTR_ERROR) { @@ -12679,6 +13144,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Enable RAS FW log support */ lpfc_sli4_ras_setup(phba); + INIT_LIST_HEAD(&phba->poll_list); + cpuhp_state_add_instance_nocalls(lpfc_cpuhp_state, &phba->cpuhp); + return 0; out_free_sysfs_attr: @@ -12946,12 +13414,8 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Block all SCSI devices' I/Os on the host */ lpfc_scsi_dev_block(phba); - /* Flush all driver's outstanding SCSI I/Os as we are to reset */ - lpfc_sli_flush_fcp_rings(phba); - - /* Flush the outstanding NVME IOs if fc4 type enabled. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - lpfc_sli_flush_nvme_rings(phba); + /* Flush all driver's outstanding I/Os as we are to reset */ + lpfc_sli_flush_io_rings(phba); /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -12982,12 +13446,8 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) /* stop all timers */ lpfc_stop_hba_timers(phba); - /* Clean up all driver's outstanding SCSI I/Os */ - lpfc_sli_flush_fcp_rings(phba); - - /* Flush the outstanding NVME IOs if fc4 type enabled. */ - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) - lpfc_sli_flush_nvme_rings(phba); + /* Clean up all driver's outstanding I/Os */ + lpfc_sli_flush_io_rings(phba); } /** @@ -13399,8 +13859,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba) phba->cfg_fof = 1; } else { phba->cfg_fof = 0; - if (phba->device_data_mem_pool) - mempool_destroy(phba->device_data_mem_pool); + mempool_destroy(phba->device_data_mem_pool); phba->device_data_mem_pool = NULL; } @@ -13505,11 +13964,24 @@ lpfc_init(void) /* Initialize in case vector mapping is needed */ lpfc_present_cpu = num_present_cpus(); + error = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, + "lpfc/sli4:online", + lpfc_cpu_online, lpfc_cpu_offline); + if (error < 0) + goto cpuhp_failure; + lpfc_cpuhp_state = error; + error = pci_register_driver(&lpfc_driver); - if (error) { - fc_release_transport(lpfc_transport_template); - fc_release_transport(lpfc_vport_transport_template); - } + if (error) + goto unwind; + + return error; + +unwind: + cpuhp_remove_multi_state(lpfc_cpuhp_state); +cpuhp_failure: + fc_release_transport(lpfc_transport_template); + fc_release_transport(lpfc_vport_transport_template); return error; } @@ -13526,21 +13998,9 @@ lpfc_exit(void) { misc_deregister(&lpfc_mgmt_dev); pci_unregister_driver(&lpfc_driver); + cpuhp_remove_multi_state(lpfc_cpuhp_state); fc_release_transport(lpfc_transport_template); fc_release_transport(lpfc_vport_transport_template); - if (_dump_buf_data) { - printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for " - "_dump_buf_data at 0x%p\n", - (1L << _dump_buf_data_order), _dump_buf_data); - free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order); - } - - if (_dump_buf_dif) { - printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for " - "_dump_buf_dif at 0x%p\n", - (1L << _dump_buf_dif_order), _dump_buf_dif); - free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order); - } idr_destroy(&lpfc_hba_index); } diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index ea10f03437f5..148d02a27b58 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -46,6 +46,23 @@ #define LOG_NVME_IOERR 0x00800000 /* NVME IO Error events. */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ +/* generate message by verbose log setting or severity */ +#define lpfc_vlog_msg(vport, level, mask, fmt, arg...) \ +{ if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '4')) \ + dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \ + fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } + +#define lpfc_log_msg(phba, level, mask, fmt, arg...) \ +do { \ + { uint32_t log_verbose = (phba)->pport ? \ + (phba)->pport->cfg_log_verbose : \ + (phba)->cfg_log_verbose; \ + if (((mask) & log_verbose) || (level[1] <= '4')) \ + dev_printk(level, &((phba)->pcidev)->dev, "%d:" \ + fmt, phba->brd_no, ##arg); \ + } \ +} while (0) + #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ do { \ { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 8abe933bad09..d1773c01d2b3 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -515,6 +515,7 @@ lpfc_init_link(struct lpfc_hba * phba, if ((phba->pcidev->device == PCI_DEVICE_ID_LANCER_G6_FC || phba->pcidev->device == PCI_DEVICE_ID_LANCER_G7_FC) && + !(phba->sli4_hba.pc_sli4_params.pls) && mb->un.varInitLnk.link_flags & FLAGS_TOPOLOGY_MODE_LOOP) { mb->un.varInitLnk.link_flags = FLAGS_TOPOLOGY_MODE_PT_PT; phba->cfg_topology = FLAGS_TOPOLOGY_MODE_PT_PT; diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 66191fa35f63..7082279e4c01 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c @@ -72,8 +72,8 @@ lpfc_mem_alloc_active_rrq_pool_s4(struct lpfc_hba *phba) { * lpfc_mem_alloc - create and allocate all PCI and memory pools * @phba: HBA to allocate pools for * - * Description: Creates and allocates PCI pools lpfc_sg_dma_buf_pool, - * lpfc_mbuf_pool, lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools + * Description: Creates and allocates PCI pools lpfc_mbuf_pool, + * lpfc_hrb_pool. Creates and allocates kmalloc-backed mempools * for LPFC_MBOXQ_t and lpfc_nodelist. Also allocates the VPI bitmask. * * Notes: Not interrupt-safe. Must be called with no locks held. If any @@ -89,36 +89,12 @@ lpfc_mem_alloc(struct lpfc_hba *phba, int align) struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool; int i; - if (phba->sli_rev == LPFC_SLI_REV4) { - /* Calculate alignment */ - if (phba->cfg_sg_dma_buf_size < SLI4_PAGE_SIZE) - i = phba->cfg_sg_dma_buf_size; - else - i = SLI4_PAGE_SIZE; - - phba->lpfc_sg_dma_buf_pool = - dma_pool_create("lpfc_sg_dma_buf_pool", - &phba->pcidev->dev, - phba->cfg_sg_dma_buf_size, - i, 0); - if (!phba->lpfc_sg_dma_buf_pool) - goto fail; - - } else { - phba->lpfc_sg_dma_buf_pool = - dma_pool_create("lpfc_sg_dma_buf_pool", - &phba->pcidev->dev, phba->cfg_sg_dma_buf_size, - align, 0); - - if (!phba->lpfc_sg_dma_buf_pool) - goto fail; - } phba->lpfc_mbuf_pool = dma_pool_create("lpfc_mbuf_pool", &phba->pcidev->dev, LPFC_BPL_SIZE, align, 0); if (!phba->lpfc_mbuf_pool) - goto fail_free_dma_buf_pool; + goto fail; pool->elements = kmalloc_array(LPFC_MBUF_POOL_SIZE, sizeof(struct lpfc_dmabuf), @@ -208,9 +184,6 @@ fail_free_drb_pool: fail_free_lpfc_mbuf_pool: dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; - fail_free_dma_buf_pool: - dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); - phba->lpfc_sg_dma_buf_pool = NULL; fail: return -ENOMEM; } @@ -248,25 +221,19 @@ lpfc_mem_free(struct lpfc_hba *phba) /* Free HBQ pools */ lpfc_sli_hbqbuf_free_all(phba); - if (phba->lpfc_nvmet_drb_pool) - dma_pool_destroy(phba->lpfc_nvmet_drb_pool); + dma_pool_destroy(phba->lpfc_nvmet_drb_pool); phba->lpfc_nvmet_drb_pool = NULL; - if (phba->lpfc_drb_pool) - dma_pool_destroy(phba->lpfc_drb_pool); + + dma_pool_destroy(phba->lpfc_drb_pool); phba->lpfc_drb_pool = NULL; - if (phba->lpfc_hrb_pool) - dma_pool_destroy(phba->lpfc_hrb_pool); + + dma_pool_destroy(phba->lpfc_hrb_pool); phba->lpfc_hrb_pool = NULL; - if (phba->txrdy_payload_pool) - dma_pool_destroy(phba->txrdy_payload_pool); - phba->txrdy_payload_pool = NULL; - if (phba->lpfc_hbq_pool) - dma_pool_destroy(phba->lpfc_hbq_pool); + dma_pool_destroy(phba->lpfc_hbq_pool); phba->lpfc_hbq_pool = NULL; - if (phba->rrq_pool) - mempool_destroy(phba->rrq_pool); + mempool_destroy(phba->rrq_pool); phba->rrq_pool = NULL; /* Free NLP memory pool */ @@ -290,10 +257,6 @@ lpfc_mem_free(struct lpfc_hba *phba) dma_pool_destroy(phba->lpfc_mbuf_pool); phba->lpfc_mbuf_pool = NULL; - /* Free DMA buffer memory pool */ - dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); - phba->lpfc_sg_dma_buf_pool = NULL; - /* Free Device Data memory pool */ if (phba->device_data_mem_pool) { /* Ensure all objects have been returned to the pool */ @@ -366,6 +329,13 @@ lpfc_mem_free_all(struct lpfc_hba *phba) /* Free and destroy all the allocated memory pools */ lpfc_mem_free(phba); + /* Free DMA buffer memory pool */ + dma_pool_destroy(phba->lpfc_sg_dma_buf_pool); + phba->lpfc_sg_dma_buf_pool = NULL; + + dma_pool_destroy(phba->lpfc_cmd_rsp_buf_pool); + phba->lpfc_cmd_rsp_buf_pool = NULL; + /* Free the iocb lookup array */ kfree(psli->iocbq_lookup); psli->iocbq_lookup = NULL; diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 59252bfca14e..a024e5a3918f 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -279,6 +279,109 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) lpfc_cancel_retry_delay_tmo(phba->pport, ndlp); } +/* lpfc_defer_pt2pt_acc - Complete SLI3 pt2pt processing on link up + * @phba: pointer to lpfc hba data structure. + * @link_mbox: pointer to CONFIG_LINK mailbox object + * + * This routine is only called if we are SLI3, direct connect pt2pt + * mode and the remote NPort issues the PLOGI after link up. + */ +static void +lpfc_defer_pt2pt_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *link_mbox) +{ + LPFC_MBOXQ_t *login_mbox; + MAILBOX_t *mb = &link_mbox->u.mb; + struct lpfc_iocbq *save_iocb; + struct lpfc_nodelist *ndlp; + int rc; + + ndlp = link_mbox->ctx_ndlp; + login_mbox = link_mbox->context3; + save_iocb = login_mbox->context3; + link_mbox->context3 = NULL; + login_mbox->context3 = NULL; + + /* Check for CONFIG_LINK error */ + if (mb->mbxStatus) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4575 CONFIG_LINK fails pt2pt discovery: %x\n", + mb->mbxStatus); + mempool_free(login_mbox, phba->mbox_mem_pool); + mempool_free(link_mbox, phba->mbox_mem_pool); + kfree(save_iocb); + return; + } + + /* Now that CONFIG_LINK completed, and our SID is configured, + * we can now proceed with sending the PLOGI ACC. + */ + rc = lpfc_els_rsp_acc(link_mbox->vport, ELS_CMD_PLOGI, + save_iocb, ndlp, login_mbox); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4576 PLOGI ACC fails pt2pt discovery: %x\n", + rc); + mempool_free(login_mbox, phba->mbox_mem_pool); + } + + mempool_free(link_mbox, phba->mbox_mem_pool); + kfree(save_iocb); +} + +/** + * lpfc_defer_tgt_acc - Progress SLI4 target rcv PLOGI handler + * @phba: Pointer to HBA context object. + * @pmb: Pointer to mailbox object. + * + * This function provides the unreg rpi mailbox completion handler for a tgt. + * The routine frees the memory resources associated with the completed + * mailbox command and transmits the ELS ACC. + * + * This routine is only called if we are SLI4, acting in target + * mode and the remote NPort issues the PLOGI after link up. + **/ +static void +lpfc_defer_acc_rsp(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) +{ + struct lpfc_vport *vport = pmb->vport; + struct lpfc_nodelist *ndlp = pmb->ctx_ndlp; + LPFC_MBOXQ_t *mbox = pmb->context3; + struct lpfc_iocbq *piocb = NULL; + int rc; + + if (mbox) { + pmb->context3 = NULL; + piocb = mbox->context3; + mbox->context3 = NULL; + } + + /* + * Complete the unreg rpi mbx request, and update flags. + * This will also restart any deferred events. + */ + lpfc_nlp_get(ndlp); + lpfc_sli4_unreg_rpi_cmpl_clr(phba, pmb); + + if (!piocb) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4578 PLOGI ACC fail\n"); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + goto out; + } + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, piocb, ndlp, mbox); + if (rc) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY | LOG_ELS, + "4579 PLOGI ACC fail %x\n", rc); + if (mbox) + mempool_free(mbox, phba->mbox_mem_pool); + } + kfree(piocb); +out: + lpfc_nlp_put(ndlp); +} + static int lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb) @@ -291,10 +394,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, IOCB_t *icmd; struct serv_parm *sp; uint32_t ed_tov; - LPFC_MBOXQ_t *mbox; + LPFC_MBOXQ_t *link_mbox; + LPFC_MBOXQ_t *login_mbox; + struct lpfc_iocbq *save_iocb; struct ls_rjt stat; uint32_t vid, flag; - int rc; + u16 rpi; + int rc, defer_acc; memset(&stat, 0, sizeof (struct ls_rjt)); pcmd = (struct lpfc_dmabuf *) cmdiocb->context2; @@ -343,6 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, else ndlp->nlp_fcp_info |= CLASS3; + defer_acc = 0; ndlp->nlp_class_sup = 0; if (sp->cls1.classValid) ndlp->nlp_class_sup |= FC_COS_CLASS1; @@ -354,7 +461,6 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_class_sup |= FC_COS_CLASS4; ndlp->nlp_maxframe = ((sp->cmn.bbRcvSizeMsb & 0x0F) << 8) | sp->cmn.bbRcvSizeLsb; - /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: @@ -396,6 +502,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_flag &= ~NLP_FIRSTBURST; + login_mbox = NULL; + link_mbox = NULL; + save_iocb = NULL; + /* Check for Nport to NPort pt2pt protocol */ if ((vport->fc_flag & FC_PT2PT) && !(vport->fc_flag & FC_PT2PT_PLOGI)) { @@ -423,17 +533,22 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (phba->sli_rev == LPFC_SLI_REV4) lpfc_issue_reg_vfi(vport); else { - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (mbox == NULL) + defer_acc = 1; + link_mbox = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!link_mbox) goto out; - lpfc_config_link(phba, mbox); - mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; - mbox->vport = vport; - rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(mbox, phba->mbox_mem_pool); + lpfc_config_link(phba, link_mbox); + link_mbox->mbox_cmpl = lpfc_defer_pt2pt_acc; + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) goto out; - } + /* Save info from cmd IOCB used in rsp */ + memcpy((uint8_t *)save_iocb, (uint8_t *)cmdiocb, + sizeof(struct lpfc_iocbq)); } lpfc_can_disctmo(vport); @@ -448,30 +563,57 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_flag |= NLP_SUPPRESS_RSP; } - mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!mbox) + login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!login_mbox) goto out; /* Registering an existing RPI behaves differently for SLI3 vs SLI4 */ - if (phba->sli_rev == LPFC_SLI_REV4) + if (phba->nvmet_support && !defer_acc) { + link_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!link_mbox) + goto out; + + /* As unique identifiers such as iotag would be overwritten + * with those from the cmdiocb, allocate separate temporary + * storage for the copy. + */ + save_iocb = kzalloc(sizeof(*save_iocb), GFP_KERNEL); + if (!save_iocb) + goto out; + + /* Unreg RPI is required for SLI4. */ + rpi = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; + lpfc_unreg_login(phba, vport->vpi, rpi, link_mbox); + link_mbox->vport = vport; + link_mbox->ctx_ndlp = ndlp; + link_mbox->mbox_cmpl = lpfc_defer_acc_rsp; + + if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && + (!(vport->fc_flag & FC_OFFLINE_MODE))) + ndlp->nlp_flag |= NLP_UNREG_INP; + + /* Save info from cmd IOCB used in rsp */ + memcpy(save_iocb, cmdiocb, sizeof(*save_iocb)); + + /* Delay sending ACC till unreg RPI completes. */ + defer_acc = 1; + } else if (phba->sli_rev == LPFC_SLI_REV4) lpfc_unreg_rpi(vport, ndlp); rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID, - (uint8_t *) sp, mbox, ndlp->nlp_rpi); - if (rc) { - mempool_free(mbox, phba->mbox_mem_pool); + (uint8_t *)sp, login_mbox, ndlp->nlp_rpi); + if (rc) goto out; - } /* ACC PLOGI rsp command needs to execute first, - * queue this mbox command to be processed later. + * queue this login_mbox command to be processed later. */ - mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; + login_mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; /* - * mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox + * login_mbox->ctx_ndlp = lpfc_nlp_get(ndlp) deferred until mailbox * command issued in lpfc_cmpl_els_acc(). */ - mbox->vport = vport; + login_mbox->vport = vport; spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); spin_unlock_irq(shost->host_lock); @@ -484,8 +626,10 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * single discovery thread, this will cause a huge delay in * discovery. Also this will cause multiple state machines * running in parallel for this node. + * This only applies to a fabric environment. */ - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) { + if ((ndlp->nlp_state == NLP_STE_PLOGI_ISSUE) && + (vport->fc_flag & FC_FABRIC)) { /* software abort outstanding PLOGI */ lpfc_els_abort(phba, ndlp); } @@ -493,6 +637,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if ((vport->port_type == LPFC_NPIV_PORT && vport->cfg_restrict_login)) { + /* no deferred ACC */ + kfree(save_iocb); + /* In order to preserve RPIs, we want to cleanup * the default RPI the firmware created to rcv * this ELS request. The only way to do this is @@ -504,16 +651,50 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; rc = lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, - ndlp, mbox); + ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); + return 1; + } + if (defer_acc) { + /* So the order here should be: + * SLI3 pt2pt + * Issue CONFIG_LINK mbox + * CONFIG_LINK cmpl + * SLI4 tgt + * Issue UNREG RPI mbx + * UNREG RPI cmpl + * Issue PLOGI ACC + * PLOGI ACC cmpl + * Issue REG_LOGIN mbox + */ + + /* Save the REG_LOGIN mbox for and rcv IOCB copy later */ + link_mbox->context3 = login_mbox; + login_mbox->context3 = save_iocb; + + /* Start the ball rolling by issuing CONFIG_LINK here */ + rc = lpfc_sli_issue_mbox(phba, link_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) + goto out; return 1; } - rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, mbox); + + rc = lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, login_mbox); if (rc) - mempool_free(mbox, phba->mbox_mem_pool); + mempool_free(login_mbox, phba->mbox_mem_pool); return 1; out: + if (defer_acc) + lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, + "4577 discovery failure: %p %p %p\n", + save_iocb, link_mbox, login_mbox); + kfree(save_iocb); + if (link_mbox) + mempool_free(link_mbox, phba->mbox_mem_pool); + if (login_mbox) + mempool_free(login_mbox, phba->mbox_mem_pool); + stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC; stat.un.b.lsRjtRsnCodeExp = LSEXP_OUT_OF_RESOURCE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); @@ -614,7 +795,7 @@ lpfc_rcv_padisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } out: /* If we are authenticated, move to the proper state */ - if (ndlp->nlp_type & NLP_FCP_TARGET) + if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); else lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -799,9 +980,15 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->writeXferRdyDis) ndlp->nlp_flag |= NLP_FIRSTBURST; } - if (npr->Retry) + if (npr->Retry && ndlp->nlp_type & + (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; + if (npr->Retry && phba->nsler && + ndlp->nlp_type & (NLP_NVME_INITIATOR | NLP_NVME_TARGET)) + ndlp->nlp_nvme_info |= NLP_NVME_NSLER; + + /* If this driver is in nvme target mode, set the ndlp's fc4 * type to NVME provided the PRLI response claims NVME FC4 * type. Target mode does not issue gft_id so doesn't get @@ -845,9 +1032,9 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) if (!(vport->fc_flag & FC_PT2PT)) { /* Check config parameter use-adisc or FCP-2 */ - if ((vport->cfg_use_adisc && (vport->fc_flag & FC_RSCN_MODE)) || + if (vport->cfg_use_adisc && ((vport->fc_flag & FC_RSCN_MODE) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && - (ndlp->nlp_type & NLP_FCP_TARGET))) { + (ndlp->nlp_type & NLP_FCP_TARGET)))) { spin_lock_irq(shost->host_lock); ndlp->nlp_flag |= NLP_NPR_ADISC; spin_unlock_irq(shost->host_lock); @@ -885,7 +1072,7 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " "NPort x%x deferred x%x flg x%x " - "Data: %p\n", + "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); @@ -1661,6 +1848,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, LPFC_MBOXQ_t *mb; LPFC_MBOXQ_t *nextmb; struct lpfc_dmabuf *mp; + struct lpfc_nodelist *ns_ndlp; cmdiocb = (struct lpfc_iocbq *) arg; @@ -1693,6 +1881,13 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, } spin_unlock_irq(&phba->hbalock); + /* software abort if any GID_FT is outstanding */ + if (vport->cfg_enable_fc4_type != LPFC_ENABLE_FCP) { + ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); + if (ns_ndlp && NLP_CHK_NODE_ACT(ns_ndlp)) + lpfc_els_abort(phba, ns_ndlp); + } + lpfc_rcv_logo(vport, ndlp, cmdiocb, ELS_CMD_LOGO); return ndlp->nlp_state; } @@ -1814,7 +2009,11 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); - lpfc_issue_els_prli(vport, ndlp, 0); + if (lpfc_issue_els_prli(vport, ndlp, 0)) { + lpfc_issue_els_logo(vport, ndlp, 0); + ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; + lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + } } else { if ((vport->fc_flag & FC_PT2PT) && phba->nvmet_support) phba->targetport->port_id = vport->fc_myDID; @@ -2012,6 +2211,13 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (bf_get_be32(prli_init, nvpr)) ndlp->nlp_type |= NLP_NVME_INITIATOR; + if (phba->nsler && bf_get_be32(prli_nsler, nvpr) && + bf_get_be32(prli_conf, nvpr)) + + ndlp->nlp_nvme_info |= NLP_NVME_NSLER; + else + ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; + /* Target driver cannot solicit NVME FB. */ if (bf_get_be32(prli_tgt, nvpr)) { /* Complete the nvme target roles. The transport @@ -2891,18 +3097,21 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint32_t(*func) (struct lpfc_vport *, struct lpfc_nodelist *, void *, uint32_t); uint32_t got_ndlp = 0; + uint32_t data1; if (lpfc_nlp_get(ndlp)) got_ndlp = 1; cur_state = ndlp->nlp_state; + data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | + ((uint32_t)ndlp->nlp_type)); /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " "state %d rpi x%x Data: x%x x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, - ndlp->nlp_flag, ndlp->nlp_fc4_type); + ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM in: evt:%d ste:%d did:x%x", @@ -2913,10 +3122,13 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM out state <rc> on NPort <nlp_DID> */ if (got_ndlp) { + data1 = (((uint32_t)ndlp->nlp_fc4_type << 16) | + ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x\n", - rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag); + "rpi x%x Data: x%x x%x\n", + rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, + data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, "DSM out: ste:%d did:x%x flg:x%x", diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 946642cee3df..f6c8963c915d 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -196,6 +196,46 @@ lpfc_nvme_cmd_template(void) } /** + * lpfc_nvme_prep_abort_wqe - set up 'abort' work queue entry. + * @pwqeq: Pointer to command iocb. + * @xritag: Tag that uniqely identifies the local exchange resource. + * @opt: Option bits - + * bit 0 = inhibit sending abts on the link + * + * This function is called with hbalock held. + **/ +void +lpfc_nvme_prep_abort_wqe(struct lpfc_iocbq *pwqeq, u16 xritag, u8 opt) +{ + union lpfc_wqe128 *wqe = &pwqeq->wqe; + + /* WQEs are reused. Clear stale data and set key fields to + * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. + */ + memset(wqe, 0, sizeof(*wqe)); + + if (opt & INHIBIT_ABORT) + bf_set(abort_cmd_ia, &wqe->abort_cmd, 1); + /* Abort specified xri tag, with the mask deliberately zeroed */ + bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG); + + bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); + + /* Abort the IO associated with this outstanding exchange ID. */ + wqe->abort_cmd.wqe_com.abort_tag = xritag; + + /* iotag for the wqe completion. */ + bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, pwqeq->iotag); + + bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); + + bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND); + bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1); + bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); +} + +/** * lpfc_nvme_create_queue - * @lpfc_pnvme: Pointer to the driver's nvme instance data * @qidx: An cpu index used to affinitize IO queues and MSIX vectors. @@ -247,7 +287,7 @@ lpfc_nvme_create_queue(struct nvme_fc_local_port *pnvme_lport, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, "6073 Binding %s HdwQueue %d (cpu %d) to " - "hdw_queue %d qhandle %p\n", str, + "hdw_queue %d qhandle x%px\n", str, qidx, qhandle->cpu_id, qhandle->index, qhandle); *handle = (void *)qhandle; return 0; @@ -282,7 +322,7 @@ lpfc_nvme_delete_queue(struct nvme_fc_local_port *pnvme_lport, vport = lport->vport; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6001 ENTER. lpfc_pnvme %p, qidx x%x qhandle %p\n", + "6001 ENTER. lpfc_pnvme x%px, qidx x%x qhandle x%px\n", lport, qidx, handle); kfree(handle); } @@ -293,7 +333,7 @@ lpfc_nvme_localport_delete(struct nvme_fc_local_port *localport) struct lpfc_nvme_lport *lport = localport->private; lpfc_printf_vlog(lport->vport, KERN_INFO, LOG_NVME, - "6173 localport %p delete complete\n", + "6173 localport x%px delete complete\n", lport); /* release any threads waiting for the unreg to complete */ @@ -332,7 +372,7 @@ lpfc_nvme_remoteport_delete(struct nvme_fc_remote_port *remoteport) * calling state machine to remove the node. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, - "6146 remoteport delete of remoteport %p\n", + "6146 remoteport delete of remoteport x%px\n", remoteport); spin_lock_irq(&vport->phba->hbalock); @@ -383,8 +423,8 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, ndlp = (struct lpfc_nodelist *)cmdwqe->context1; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6047 nvme cmpl Enter " - "Data %p DID %x Xri: %x status %x reason x%x cmd:%p " - "lsreg:%p bmp:%p ndlp:%p\n", + "Data %px DID %x Xri: %x status %x reason x%x " + "cmd:x%px lsreg:x%px bmp:x%px ndlp:x%px\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status, (wcqe->parameter & 0xffff), @@ -404,7 +444,7 @@ lpfc_nvme_cmpl_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, else lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, "6046 nvme cmpl without done call back? " - "Data %p DID %x Xri: %x status %x\n", + "Data %px DID %x Xri: %x status %x\n", pnvme_lsreq, ndlp ? ndlp->nlp_DID : 0, cmdwqe->sli4_xritag, status); if (ndlp) { @@ -436,6 +476,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, return 1; wqe = &genwqe->wqe; + /* Initialize only 64 bytes */ memset(wqe, 0, sizeof(union lpfc_wqe)); genwqe->context3 = (uint8_t *)bmp; @@ -516,7 +557,8 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, /* Issue GEN REQ WQE for NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "6050 Issue GEN REQ WQE to NPORT x%x " - "Data: x%x x%x wq:%p lsreq:%p bmp:%p xmit:%d 1st:%d\n", + "Data: x%x x%x wq:x%px lsreq:x%px bmp:x%px " + "xmit:%d 1st:%d\n", ndlp->nlp_DID, genwqe->iotag, vport->port_state, genwqe, pnvme_lsreq, bmp, xmit_len, first_len); @@ -594,7 +636,7 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, ndlp = rport->ndlp; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, - "6051 Remoteport %p, rport has invalid ndlp. " + "6051 Remoteport x%px, rport has invalid ndlp. " "Failing LS Req\n", pnvme_rport); return -ENODEV; } @@ -646,10 +688,10 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, /* Expand print to include key fields. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, - "6149 Issue LS Req to DID 0x%06x lport %p, rport %p " - "lsreq%p rqstlen:%d rsplen:%d %pad %pad\n", - ndlp->nlp_DID, - pnvme_lport, pnvme_rport, + "6149 Issue LS Req to DID 0x%06x lport x%px, " + "rport x%px lsreq x%px rqstlen:%d rsplen:%d " + "%pad %pad\n", + ndlp->nlp_DID, pnvme_lport, pnvme_rport, pnvme_lsreq, pnvme_lsreq->rqstlen, pnvme_lsreq->rsplen, &pnvme_lsreq->rqstdma, &pnvme_lsreq->rspdma); @@ -665,8 +707,8 @@ lpfc_nvme_ls_req(struct nvme_fc_local_port *pnvme_lport, if (ret != WQE_SUCCESS) { atomic_inc(&lport->xmt_ls_err); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, - "6052 EXIT. issue ls wqe failed lport %p, " - "rport %p lsreq%p Status %x DID %x\n", + "6052 EXIT. issue ls wqe failed lport x%px, " + "rport x%px lsreq x%px Status %x DID %x\n", pnvme_lport, pnvme_rport, pnvme_lsreq, ret, ndlp->nlp_DID); lpfc_mbuf_free(vport->phba, bmp->virt, bmp->phys); @@ -723,7 +765,7 @@ lpfc_nvme_ls_abort(struct nvme_fc_local_port *pnvme_lport, /* Expand print to include key fields. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, - "6040 ENTER. lport %p, rport %p lsreq %p rqstlen:%d " + "6040 ENTER. lport x%px, rport x%px lsreq x%px rqstlen:%d " "rsplen:%d %pad %pad\n", pnvme_lport, pnvme_rport, pnvme_lsreq, pnvme_lsreq->rqstlen, @@ -984,8 +1026,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, if (!lpfc_ncmd->nvmeCmd) { spin_unlock(&lpfc_ncmd->buf_lock); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE | LOG_NVME_IOERR, - "6066 Missing cmpl ptrs: lpfc_ncmd %p, " - "nvmeCmd %p\n", + "6066 Missing cmpl ptrs: lpfc_ncmd x%px, " + "nvmeCmd x%px\n", lpfc_ncmd, lpfc_ncmd->nvmeCmd); /* Release the lpfc_ncmd regardless of the missing elements. */ @@ -998,9 +1040,9 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, idx = lpfc_ncmd->cur_iocbq.hba_wqidx; phba->sli4_hba.hdwq[idx].nvme_cstat.io_cmpls++; - if (vport->localport) { + if (unlikely(status && vport->localport)) { lport = (struct lpfc_nvme_lport *)vport->localport->private; - if (lport && status) { + if (lport) { if (bf_get(lpfc_wcqe_c_xb, wcqe)) atomic_inc(&lport->cmpl_fcp_xb); atomic_inc(&lport->cmpl_fcp_err); @@ -1100,8 +1142,8 @@ lpfc_nvme_io_cmd_wqe_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, if (lpfc_ncmd->result == IOERR_ABORT_REQUESTED) lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6032 Delay Aborted cmd %p " - "nvme cmd %p, xri x%x, " + "6032 Delay Aborted cmd x%px " + "nvme cmd x%px, xri x%x, " "xb %d\n", lpfc_ncmd, nCmd, lpfc_ncmd->cur_iocbq.sli4_xritag, @@ -1140,7 +1182,7 @@ out_err: phba->ktime_last_cmd = lpfc_ncmd->ts_data_nvme; lpfc_nvme_ktime(phba, lpfc_ncmd); } - if (phba->cpucheck_on & LPFC_CHECK_NVME_IO) { + if (unlikely(phba->cpucheck_on & LPFC_CHECK_NVME_IO)) { uint32_t cpu; idx = lpfc_ncmd->cur_iocbq.hba_wqidx; cpu = raw_smp_processor_id(); @@ -1253,6 +1295,9 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, sizeof(uint32_t) * 8); cstat->control_requests++; } + + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + bf_set(wqe_erp, &wqe->generic.wqe_com, 1); /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer @@ -1304,14 +1349,16 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; union lpfc_wqe128 *wqe = &lpfc_ncmd->cur_iocbq.wqe; struct sli4_sge *sgl = lpfc_ncmd->dma_sgl; + struct sli4_hybrid_sgl *sgl_xtra = NULL; struct scatterlist *data_sg; struct sli4_sge *first_data_sgl; struct ulp_bde64 *bde; - dma_addr_t physaddr; + dma_addr_t physaddr = 0; uint32_t num_bde = 0; - uint32_t dma_len; + uint32_t dma_len = 0; uint32_t dma_offset = 0; - int nseg, i; + int nseg, i, j; + bool lsp_just_set = false; /* Fix up the command and response DMA stuff. */ lpfc_nvme_adj_fcp_sgls(vport, lpfc_ncmd, nCmd); @@ -1348,6 +1395,9 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, */ nseg = nCmd->sg_cnt; data_sg = nCmd->first_sgl; + + /* for tracking the segment boundaries */ + j = 2; for (i = 0; i < nseg; i++) { if (data_sg == NULL) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, @@ -1356,23 +1406,76 @@ lpfc_nvme_prep_io_dma(struct lpfc_vport *vport, lpfc_ncmd->seg_cnt = 0; return 1; } - physaddr = data_sg->dma_address; - dma_len = data_sg->length; - sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); - sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); - sgl->word2 = le32_to_cpu(sgl->word2); - if ((num_bde + 1) == nseg) + + sgl->word2 = 0; + if ((num_bde + 1) == nseg) { bf_set(lpfc_sli4_sge_last, sgl, 1); - else + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } else { bf_set(lpfc_sli4_sge_last, sgl, 0); - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(dma_len); - - dma_offset += dma_len; - data_sg = sg_next(data_sg); - sgl++; + + /* expand the segment */ + if (!lsp_just_set && + !((j + 1) % phba->border_sge_num) && + ((nseg - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq( + phba, lpfc_ncmd); + + if (unlikely(!sgl_xtra)) { + lpfc_ncmd->seg_cnt = 0; + return 1; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & + LPFC_SGE_TYPE_LSP)) { + if ((nseg - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + + physaddr = data_sg->dma_address; + dma_len = data_sg->length; + sgl->addr_lo = cpu_to_le32( + putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32( + putPaddrHigh(physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + data_sg = sg_next(data_sg); + + sgl++; + + lsp_just_set = false; + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; } if (phba->cfg_enable_pbde) { /* Use PBDE support for first SGL only, offset == 0 */ @@ -1474,7 +1577,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, goto out_fail; } - if (vport->load_flag & FC_UNLOADING) { + if (unlikely(vport->load_flag & FC_UNLOADING)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, "6124 Fail IO, Driver unload\n"); atomic_inc(&lport->xmt_fcp_err); @@ -1505,8 +1608,8 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, ndlp = rport->ndlp; if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_NVME_IOERR, - "6053 Fail IO, ndlp not ready: rport %p " - "ndlp %p, DID x%06x\n", + "6053 Busy IO, ndlp not ready: rport x%px " + "ndlp x%px, DID x%06x\n", rport, ndlp, pnvme_rport->port_id); atomic_inc(&lport->xmt_fcp_err); ret = -EBUSY; @@ -1728,7 +1831,6 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_iocbq *abts_buf; struct lpfc_iocbq *nvmereq_wqe; struct lpfc_nvme_fcpreq_priv *freqpriv; - union lpfc_wqe128 *abts_wqe; unsigned long flags; int ret_val; @@ -1758,7 +1860,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Announce entry to new IO submit field. */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6002 Abort Request to rport DID x%06x " - "for nvme_fc_req %p\n", + "for nvme_fc_req x%px\n", pnvme_rport->port_id, pnvme_fcreq); @@ -1767,7 +1869,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, */ spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6139 Driver in reset cleanup - flushing " @@ -1805,8 +1907,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, if (lpfc_nbuf->nvmeCmd != pnvme_fcreq) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6143 NVME req mismatch: " - "lpfc_nbuf %p nvmeCmd %p, " - "pnvme_fcreq %p. Skipping Abort xri x%x\n", + "lpfc_nbuf x%px nvmeCmd x%px, " + "pnvme_fcreq x%px. Skipping Abort xri x%x\n", lpfc_nbuf, lpfc_nbuf->nvmeCmd, pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock; @@ -1815,7 +1917,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Don't abort IOs no longer on the pending queue. */ if (!(nvmereq_wqe->iocb_flag & LPFC_IO_ON_TXCMPLQ)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, - "6142 NVME IO req %p not queued - skipping " + "6142 NVME IO req x%px not queued - skipping " "abort req xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock; @@ -1830,8 +1932,8 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, if (nvmereq_wqe->iocb_flag & LPFC_DRIVER_ABORTED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6144 Outstanding NVME I/O Abort Request " - "still pending on nvme_fcreq %p, " - "lpfc_ncmd %p xri x%x\n", + "still pending on nvme_fcreq x%px, " + "lpfc_ncmd %px xri x%x\n", pnvme_fcreq, lpfc_nbuf, nvmereq_wqe->sli4_xritag); goto out_unlock; @@ -1841,7 +1943,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, if (!abts_buf) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6136 No available abort wqes. Skipping " - "Abts req for nvme_fcreq %p xri x%x\n", + "Abts req for nvme_fcreq x%px xri x%x\n", pnvme_fcreq, nvmereq_wqe->sli4_xritag); goto out_unlock; } @@ -1849,37 +1951,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, /* Ready - mark outstanding as aborted by driver. */ nvmereq_wqe->iocb_flag |= LPFC_DRIVER_ABORTED; - /* Complete prepping the abort wqe and issue to the FW. */ - abts_wqe = &abts_buf->wqe; - - /* WQEs are reused. Clear stale data and set key fields to - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. - */ - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* word 7 */ - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - bf_set(wqe_class, &abts_wqe->abort_cmd.wqe_com, - nvmereq_wqe->iocb.ulpClass); - - /* word 8 - tell the FW to abort the IO associated with this - * outstanding exchange ID. - */ - abts_wqe->abort_cmd.wqe_com.abort_tag = nvmereq_wqe->sli4_xritag; - - /* word 9 - this is the iotag for the abts_wqe completion. */ - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, - abts_buf->iotag); - - /* word 10 */ - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); - - /* word 11 */ - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + lpfc_nvme_prep_abort_wqe(abts_buf, nvmereq_wqe->sli4_xritag, 0); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_buf->iocb_flag |= LPFC_IO_NVME; @@ -1892,7 +1964,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, if (ret_val) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_ABTS, "6137 Failed abts issue_wqe with status x%x " - "for nvme_fcreq %p.\n", + "for nvme_fcreq x%px.\n", ret_val, pnvme_fcreq); lpfc_sli_release_iocbq(phba, abts_buf); return; @@ -1913,6 +1985,8 @@ out_unlock: /* Declare and initialization an instance of the FC NVME template. */ static struct nvme_fc_port_template lpfc_nvme_template = { + .module = THIS_MODULE, + /* initiator-based functions */ .localport_delete = lpfc_nvme_localport_delete, .remoteport_delete = lpfc_nvme_remoteport_delete, @@ -1982,7 +2056,7 @@ lpfc_get_nvme_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, sgl->word2 = cpu_to_le32(sgl->word2); /* Fill in word 3 / sgl_len during cmd submission */ - /* Initialize WQE */ + /* Initialize 64 bytes only */ memset(wqe, 0, sizeof(union lpfc_wqe)); if (lpfc_ndlp_check_qdepth(phba, ndlp)) { @@ -2021,18 +2095,18 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd) lpfc_ncmd->flags &= ~LPFC_SBUF_BUMP_QDEPTH; qp = lpfc_ncmd->hdwq; - if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) { + if (unlikely(lpfc_ncmd->flags & LPFC_SBUF_XBUSY)) { lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, "6310 XB release deferred for " "ox_id x%x on reqtag x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->cur_iocbq.iotag); - spin_lock_irqsave(&qp->abts_nvme_buf_list_lock, iflag); + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); list_add_tail(&lpfc_ncmd->list, - &qp->lpfc_abts_nvme_buf_list); + &qp->lpfc_abts_io_buf_list); qp->abts_nvme_io_bufs++; - spin_unlock_irqrestore(&qp->abts_nvme_buf_list_lock, iflag); + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); } else lpfc_release_io_buf(phba, (struct lpfc_io_buf *)lpfc_ncmd, qp); } @@ -2076,12 +2150,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) */ lpfc_nvme_template.max_sgl_segments = phba->cfg_nvme_seg_cnt + 1; - /* Advertise how many hw queues we support based on fcp_io_sched */ - if (phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) - lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; - else - lpfc_nvme_template.max_hw_queues = - phba->sli4_hba.num_present_cpu; + /* Advertise how many hw queues we support based on cfg_hdw_queue, + * which will not exceed cpu count. + */ + lpfc_nvme_template.max_hw_queues = phba->cfg_hdw_queue; if (!IS_ENABLED(CONFIG_NVME_FC)) return ret; @@ -2095,8 +2167,8 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport) if (!ret) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC, "6005 Successfully registered local " - "NVME port num %d, localP %p, private %p, " - "sg_seg %d\n", + "NVME port num %d, localP x%px, private " + "x%px, sg_seg %d\n", localport->port_num, localport, localport->private, lpfc_nvme_template.max_sgl_segments); @@ -2157,14 +2229,14 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, if (unlikely(!ret)) { pending = 0; for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; if (!pring) continue; if (pring->txcmplq_cnt) pending += pring->txcmplq_cnt; } lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_IOERR, - "6176 Lport %p Localport %p wait " + "6176 Lport x%px Localport x%px wait " "timed out. Pending %d. Renewing.\n", lport, vport->localport, pending); continue; @@ -2172,7 +2244,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport, break; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_IOERR, - "6177 Lport %p Localport %p Complete Success\n", + "6177 Lport x%px Localport x%px Complete Success\n", lport, vport->localport); } #endif @@ -2203,7 +2275,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport) lport = (struct lpfc_nvme_lport *)localport->private; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6011 Destroying NVME localport %p\n", + "6011 Destroying NVME localport x%px\n", localport); /* lport's rport list is clear. Unregister @@ -2253,12 +2325,12 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) lport = (struct lpfc_nvme_lport *)localport->private; if (!lport) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_NVME, - "6171 Update NVME fail. localP %p, No lport\n", + "6171 Update NVME fail. localP x%px, No lport\n", localport); return; } lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6012 Update NVME lport %p did x%x\n", + "6012 Update NVME lport x%px did x%x\n", localport, vport->fc_myDID); localport->port_id = vport->fc_myDID; @@ -2268,7 +2340,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport) localport->port_role = FC_PORT_ROLE_NVME_INITIATOR; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, - "6030 bound lport %p to DID x%06x\n", + "6030 bound lport x%px to DID x%06x\n", lport, localport->port_id); #endif } @@ -2317,9 +2389,13 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_lock_irq(&vport->phba->hbalock); oldrport = lpfc_ndlp_get_nrport(ndlp); - spin_unlock_irq(&vport->phba->hbalock); - if (!oldrport) + if (oldrport) { + prev_ndlp = oldrport->ndlp; + spin_unlock_irq(&vport->phba->hbalock); + } else { + spin_unlock_irq(&vport->phba->hbalock); lpfc_nlp_get(ndlp); + } ret = nvme_fc_register_remoteport(localport, &rpinfo, &remote_port); if (!ret) { @@ -2338,25 +2414,34 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* New remoteport record does not guarantee valid * host private memory area. */ - prev_ndlp = oldrport->ndlp; if (oldrport == remote_port->private) { /* Same remoteport - ndlp should match. * Just reuse. */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NVME_DISC, - "6014 Rebinding lport to " - "remoteport %p wwpn 0x%llx, " - "Data: x%x x%x %p %p x%x x%06x\n", + "6014 Rebind lport to current " + "remoteport x%px wwpn 0x%llx, " + "Data: x%x x%x x%px x%px x%x " + " x%06x\n", remote_port, remote_port->port_name, remote_port->port_id, remote_port->port_role, - prev_ndlp, + oldrport->ndlp, ndlp, ndlp->nlp_type, ndlp->nlp_DID); - return 0; + + /* It's a complete rebind only if the driver + * is registering with the same ndlp. Otherwise + * the driver likely executed a node swap + * prior to this registration and the ndlp to + * remoteport binding needs to be redone. + */ + if (prev_ndlp == ndlp) + return 0; + } /* Sever the ndlp<->rport association @@ -2390,10 +2475,10 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) spin_unlock_irq(&vport->phba->hbalock); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC | LOG_NODE, - "6022 Binding new rport to " - "lport %p Remoteport %p rport %p WWNN 0x%llx, " + "6022 Bind lport x%px to remoteport x%px " + "rport x%px WWNN 0x%llx, " "Rport WWPN 0x%llx DID " - "x%06x Role x%x, ndlp %p prev_ndlp %p\n", + "x%06x Role x%x, ndlp %p prev_ndlp x%px\n", lport, remote_port, rport, rpinfo.node_name, rpinfo.port_name, rpinfo.port_id, rpinfo.port_role, @@ -2423,20 +2508,23 @@ void lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { #if (IS_ENABLED(CONFIG_NVME_FC)) - struct lpfc_nvme_rport *rport; - struct nvme_fc_remote_port *remoteport; + struct lpfc_nvme_rport *nrport; + struct nvme_fc_remote_port *remoteport = NULL; - rport = ndlp->nrport; + spin_lock_irq(&vport->phba->hbalock); + nrport = lpfc_ndlp_get_nrport(ndlp); + if (nrport) + remoteport = nrport->remoteport; + spin_unlock_irq(&vport->phba->hbalock); lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6170 Rescan NPort DID x%06x type x%x " - "state x%x rport %p\n", - ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, rport); - if (!rport) - goto input_err; - remoteport = rport->remoteport; - if (!remoteport) - goto input_err; + "state x%x nrport x%px remoteport x%px\n", + ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_state, + nrport, remoteport); + + if (!nrport || !remoteport) + goto rescan_exit; /* Only rescan if we are an NVME target in the MAPPED state */ if (remoteport->port_role & FC_PORT_ROLE_NVME_DISCOVERY && @@ -2449,10 +2537,10 @@ lpfc_nvme_rescan_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) ndlp->nlp_DID, remoteport->port_state); } return; -input_err: - lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, - "6169 State error: lport %p, rport%p FCID x%06x\n", - vport->localport, ndlp->rport, ndlp->nlp_DID); + rescan_exit: + lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, + "6169 Skip NVME Rport Rescan, NVME remoteport " + "unregistered\n"); #endif } @@ -2499,7 +2587,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) goto input_err; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, - "6033 Unreg nvme remoteport %p, portname x%llx, " + "6033 Unreg nvme remoteport x%px, portname x%llx, " "port_id x%06x, portstate x%x port type x%x\n", remoteport, remoteport->port_name, remoteport->port_id, remoteport->port_state, @@ -2537,7 +2625,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) input_err: #endif lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC, - "6168 State error: lport %p, rport%p FCID x%06x\n", + "6168 State error: lport x%px, rport x%px FCID x%06x\n", vport->localport, ndlp->rport, ndlp->nlp_DID); } @@ -2545,6 +2633,7 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. + * @lpfc_ncmd: The nvme job structure for the request being aborted. * * This routine is invoked by the worker thread to process a SLI4 fast-path * NVME aborted xri. Aborted NVME IO commands are completed to the transport @@ -2552,59 +2641,33 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) **/ void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri, int idx) + struct sli4_wcqe_xri_aborted *axri, + struct lpfc_io_buf *lpfc_ncmd) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); - struct lpfc_io_buf *lpfc_ncmd, *next_lpfc_ncmd; struct nvmefc_fcp_req *nvme_cmd = NULL; - struct lpfc_nodelist *ndlp; - struct lpfc_sli4_hdw_queue *qp; - unsigned long iflag = 0; + struct lpfc_nodelist *ndlp = lpfc_ncmd->ndlp; - if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; - qp = &phba->sli4_hba.hdwq[idx]; - spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&qp->abts_nvme_buf_list_lock); - list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd, - &qp->lpfc_abts_nvme_buf_list, list) { - if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) { - list_del_init(&lpfc_ncmd->list); - qp->abts_nvme_io_bufs--; - lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY; - lpfc_ncmd->status = IOSTAT_SUCCESS; - spin_unlock(&qp->abts_nvme_buf_list_lock); - - spin_unlock_irqrestore(&phba->hbalock, iflag); - ndlp = lpfc_ncmd->ndlp; - if (ndlp) - lpfc_sli4_abts_err_handler(phba, ndlp, axri); - - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6311 nvme_cmd %p xri x%x tag x%x " - "abort complete and xri released\n", - lpfc_ncmd->nvmeCmd, xri, - lpfc_ncmd->cur_iocbq.iotag); - - /* Aborted NVME commands are required to not complete - * before the abort exchange command fully completes. - * Once completed, it is available via the put list. - */ - if (lpfc_ncmd->nvmeCmd) { - nvme_cmd = lpfc_ncmd->nvmeCmd; - nvme_cmd->done(nvme_cmd); - lpfc_ncmd->nvmeCmd = NULL; - } - lpfc_release_nvme_buf(phba, lpfc_ncmd); - return; - } - } - spin_unlock(&qp->abts_nvme_buf_list_lock); - spin_unlock_irqrestore(&phba->hbalock, iflag); - lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6312 XRI Aborted xri x%x not found\n", xri); + if (ndlp) + lpfc_sli4_abts_err_handler(phba, ndlp, axri); + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, + "6311 nvme_cmd %p xri x%x tag x%x abort complete and " + "xri released\n", + lpfc_ncmd->nvmeCmd, xri, + lpfc_ncmd->cur_iocbq.iotag); + + /* Aborted NVME commands are required to not complete + * before the abort exchange command fully completes. + * Once completed, it is available via the put list. + */ + if (lpfc_ncmd->nvmeCmd) { + nvme_cmd = lpfc_ncmd->nvmeCmd; + nvme_cmd->done(nvme_cmd); + lpfc_ncmd->nvmeCmd = NULL; + } + lpfc_release_nvme_buf(phba, lpfc_ncmd); } /** @@ -2626,13 +2689,13 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) if (phba->sli_rev < LPFC_SLI_REV4 || !phba->sli4_hba.hdwq) return; - /* Cycle through all NVME rings and make sure all outstanding + /* Cycle through all IO rings and make sure all outstanding * WQEs have been removed from the txcmplqs. */ for (i = 0; i < phba->cfg_hdw_queue; i++) { - if (!phba->sli4_hba.hdwq[i].nvme_wq) + if (!phba->sli4_hba.hdwq[i].io_wq) continue; - pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; if (!pring) continue; @@ -2653,3 +2716,50 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba) } } } + +void +lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn) +{ +#if (IS_ENABLED(CONFIG_NVME_FC)) + struct lpfc_io_buf *lpfc_ncmd; + struct nvmefc_fcp_req *nCmd; + struct lpfc_nvme_fcpreq_priv *freqpriv; + + if (!pwqeIn->context1) { + lpfc_sli_release_iocbq(phba, pwqeIn); + return; + } + /* For abort iocb just return, IO iocb will do a done call */ + if (bf_get(wqe_cmnd, &pwqeIn->wqe.gen_req.wqe_com) == + CMD_ABORT_XRI_CX) { + lpfc_sli_release_iocbq(phba, pwqeIn); + return; + } + lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1; + + spin_lock(&lpfc_ncmd->buf_lock); + if (!lpfc_ncmd->nvmeCmd) { + spin_unlock(&lpfc_ncmd->buf_lock); + lpfc_release_nvme_buf(phba, lpfc_ncmd); + return; + } + + nCmd = lpfc_ncmd->nvmeCmd; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR, + "6194 NVME Cancel xri %x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag); + + nCmd->transferred_length = 0; + nCmd->rcv_rsplen = 0; + nCmd->status = NVME_SC_INTERNAL; + freqpriv = nCmd->private; + freqpriv->nvme_buf = NULL; + lpfc_ncmd->nvmeCmd = NULL; + + spin_unlock(&lpfc_ncmd->buf_lock); + nCmd->done(nCmd); + + /* Call release with XB=1 to queue the IO into the abort list. */ + lpfc_release_nvme_buf(phba, lpfc_ncmd); +#endif +} diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index faa596f9e861..9dc9afe1c255 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -378,13 +378,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) int cpu; unsigned long iflag; - if (ctxp->txrdy) { - dma_pool_free(phba->txrdy_payload_pool, ctxp->txrdy, - ctxp->txrdy_phys); - ctxp->txrdy = NULL; - ctxp->txrdy_phys = 0; - } - if (ctxp->state == LPFC_NVMET_STE_FREE) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, "6411 NVMET free, already free IO x%x: %d %d\n", @@ -430,7 +423,6 @@ lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba, struct lpfc_nvmet_ctxbuf *ctx_buf) ctxp = (struct lpfc_nvmet_rcv_ctx *)ctx_buf->context; ctxp->wqeq = NULL; - ctxp->txrdy = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; @@ -1026,7 +1018,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport, * WQE release CQE */ ctxp->flag |= LPFC_NVMET_DEFER_WQFULL; - wq = ctxp->hdwq->nvme_wq; + wq = ctxp->hdwq->io_wq; pring = wq->pring; spin_lock_irqsave(&pring->ring_lock, iflags); list_add_tail(&nvmewqeq->list, &wq->wqfull_list); @@ -1104,7 +1096,7 @@ lpfc_nvmet_xmt_fcp_abort(struct nvmet_fc_target_port *tgtport, spin_unlock_irqrestore(&ctxp->ctxlock, flags); lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, ctxp->oxid); - wq = ctxp->hdwq->nvme_wq; + wq = ctxp->hdwq->io_wq; lpfc_nvmet_wqfull_flush(phba, wq, ctxp); return; } @@ -1437,7 +1429,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba) infop = lpfc_get_ctx_list(phba, i, j); lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_INIT, "6408 TOTAL NVMET ctx for CPU %d " - "MRQ %d: cnt %d nextcpu %p\n", + "MRQ %d: cnt %d nextcpu x%px\n", i, j, infop->nvmet_ctx_list_cnt, infop->nvmet_ctx_next_cpu); } @@ -1500,7 +1492,7 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_INFO, LOG_NVME_DISC, "6026 Registered NVME " - "targetport: %p, private %p " + "targetport: x%px, private x%px " "portnm %llx nodenm %llx segs %d qs %d\n", phba->targetport, tgtp, pinfo.port_name, pinfo.node_name, @@ -1555,7 +1547,7 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba) return 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6007 Update NVMET port %p did x%x\n", + "6007 Update NVMET port x%px did x%x\n", phba->targetport, vport->fc_myDID); phba->targetport->port_id = vport->fc_myDID; @@ -1790,12 +1782,8 @@ lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport, lpfc_nvmet_defer_release(phba, ctxp); spin_unlock_irqrestore(&ctxp->ctxlock, iflag); } - if (ctxp->state == LPFC_NVMET_STE_RCV) - lpfc_nvmet_unsol_fcp_issue_abort(phba, ctxp, ctxp->sid, - ctxp->oxid); - else - lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, - ctxp->oxid); + lpfc_nvmet_sol_fcp_issue_abort(phba, ctxp, ctxp->sid, + ctxp->oxid); lpfc_sli4_seq_abort_rsp(vport, fc_hdr, 1); return 0; @@ -1922,7 +1910,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) if (phba->targetport) { tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { - wq = phba->sli4_hba.hdwq[qidx].nvme_wq; + wq = phba->sli4_hba.hdwq[qidx].io_wq; lpfc_nvmet_wqfull_flush(phba, wq, NULL); } tgtp->tport_unreg_cmp = &tport_unreg_cmp; @@ -1930,7 +1918,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba) if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp, msecs_to_jiffies(LPFC_NVMET_WAIT_TMO))) lpfc_printf_log(phba, KERN_ERR, LOG_NVME, - "6179 Unreg targetport %p timeout " + "6179 Unreg targetport x%px timeout " "reached.\n", phba->targetport); lpfc_nvmet_cleanup_io_context(phba); } @@ -1962,12 +1950,10 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, uint32_t *payload; uint32_t size, oxid, sid, rc; - fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); - oxid = be16_to_cpu(fc_hdr->fh_ox_id); - if (!phba->targetport) { + if (!nvmebuf || !phba->targetport) { lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6154 LS Drop IO x%x\n", oxid); + "6154 LS Drop IO\n"); oxid = 0; size = 0; sid = 0; @@ -1975,6 +1961,9 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, goto dropit; } + fc_hdr = (struct fc_frame_header *)(nvmebuf->hbuf.virt); + oxid = be16_to_cpu(fc_hdr->fh_ox_id); + tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; payload = (uint32_t *)(nvmebuf->dbuf.virt); size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl); @@ -2330,7 +2319,6 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba, ctxp->state, ctxp->entry_cnt, ctxp->oxid); } ctxp->wqeq = NULL; - ctxp->txrdy = NULL; ctxp->offset = 0; ctxp->phba = phba; ctxp->size = size; @@ -2405,6 +2393,11 @@ lpfc_nvmet_unsol_ls_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, d_buf = piocb->context2; nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf); + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "3015 LS Drop IO\n"); + return; + } if (phba->nvmet_support == 0) { lpfc_in_buf_free(phba, &nvmebuf->dbuf); return; @@ -2433,6 +2426,11 @@ lpfc_nvmet_unsol_fcp_event(struct lpfc_hba *phba, uint64_t isr_timestamp, uint8_t cqflag) { + if (!nvmebuf) { + lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, + "3167 NVMET FCP Drop IO\n"); + return; + } if (phba->nvmet_support == 0) { lpfc_rq_buf_free(phba, &nvmebuf->hbuf); return; @@ -2599,7 +2597,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, struct scatterlist *sgel; union lpfc_wqe128 *wqe; struct ulp_bde64 *bde; - uint32_t *txrdy; dma_addr_t physaddr; int i, cnt; int do_pbde; @@ -2761,23 +2758,11 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, &lpfc_treceive_cmd_template.words[3], sizeof(uint32_t) * 9); - /* Words 0 - 2 : The first sg segment */ - txrdy = dma_pool_alloc(phba->txrdy_payload_pool, - GFP_KERNEL, &physaddr); - if (!txrdy) { - lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR, - "6041 Bad txrdy buffer: oxid x%x\n", - ctxp->oxid); - return NULL; - } - ctxp->txrdy = txrdy; - ctxp->txrdy_phys = physaddr; - wqe->fcp_treceive.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64; - wqe->fcp_treceive.bde.tus.f.bdeSize = TXRDY_PAYLOAD_LEN; - wqe->fcp_treceive.bde.addrLow = - cpu_to_le32(putPaddrLow(physaddr)); - wqe->fcp_treceive.bde.addrHigh = - cpu_to_le32(putPaddrHigh(physaddr)); + /* Words 0 - 2 : First SGE is skipped, set invalid BDE type */ + wqe->fcp_treceive.bde.tus.f.bdeFlags = LPFC_SGE_TYPE_SKIP; + wqe->fcp_treceive.bde.tus.f.bdeSize = 0; + wqe->fcp_treceive.bde.addrLow = 0; + wqe->fcp_treceive.bde.addrHigh = 0; /* Word 4 */ wqe->fcp_treceive.relative_offset = ctxp->offset; @@ -2812,17 +2797,13 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* Word 12 */ wqe->fcp_tsend.fcp_data_len = rsp->transfer_length; - /* Setup 1 TXRDY and 1 SKIP SGE */ - txrdy[0] = 0; - txrdy[1] = cpu_to_be32(rsp->transfer_length); - txrdy[2] = 0; - - sgl->addr_hi = putPaddrHigh(physaddr); - sgl->addr_lo = putPaddrLow(physaddr); + /* Setup 2 SKIP SGEs */ + sgl->addr_hi = 0; + sgl->addr_lo = 0; sgl->word2 = 0; - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_SKIP); sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(TXRDY_PAYLOAD_LEN); + sgl->sge_len = 0; sgl++; sgl->addr_hi = 0; sgl->addr_lo = 0; @@ -3113,7 +3094,7 @@ lpfc_nvmet_xmt_ls_abort_cmp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdwqe, atomic_inc(&tgtp->xmt_ls_abort_cmpl); lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS, - "6083 Abort cmpl: ctx %p WCQE:%08x %08x %08x %08x\n", + "6083 Abort cmpl: ctx x%px WCQE:%08x %08x %08x %08x\n", ctxp, wcqe->word0, wcqe->total_data_placed, result, wcqe->word3); @@ -3243,9 +3224,9 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, { struct lpfc_nvmet_tgtport *tgtp; struct lpfc_iocbq *abts_wqeq; - union lpfc_wqe128 *abts_wqe; struct lpfc_nodelist *ndlp; unsigned long flags; + u8 opt; int rc; tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private; @@ -3284,8 +3265,8 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, return 0; } abts_wqeq = ctxp->abort_wqeq; - abts_wqe = &abts_wqeq->wqe; ctxp->state = LPFC_NVMET_STE_ABORT; + opt = (ctxp->flag & LPFC_NVMET_ABTS_RCV) ? INHIBIT_ABORT : 0; spin_unlock_irqrestore(&ctxp->ctxlock, flags); /* Announce entry to new IO submit field. */ @@ -3299,7 +3280,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, */ spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_NVME_IOQ_FLUSH) { + if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, flags); atomic_inc(&tgtp->xmt_abort_rsp_error); lpfc_printf_log(phba, KERN_ERR, LOG_NVME, @@ -3331,40 +3312,12 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba, /* Ready - mark outstanding as aborted by driver. */ abts_wqeq->iocb_flag |= LPFC_DRIVER_ABORTED; - /* WQEs are reused. Clear stale data and set key fields to - * zero like ia, iaab, iaar, xri_tag, and ctxt_tag. - */ - memset(abts_wqe, 0, sizeof(union lpfc_wqe)); - - /* word 3 */ - bf_set(abort_cmd_criteria, &abts_wqe->abort_cmd, T_XRI_TAG); - - /* word 7 */ - bf_set(wqe_ct, &abts_wqe->abort_cmd.wqe_com, 0); - bf_set(wqe_cmnd, &abts_wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX); - - /* word 8 - tell the FW to abort the IO associated with this - * outstanding exchange ID. - */ - abts_wqe->abort_cmd.wqe_com.abort_tag = ctxp->wqeq->sli4_xritag; - - /* word 9 - this is the iotag for the abts_wqe completion. */ - bf_set(wqe_reqtag, &abts_wqe->abort_cmd.wqe_com, - abts_wqeq->iotag); - - /* word 10 */ - bf_set(wqe_qosd, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_lenloc, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE); - - /* word 11 */ - bf_set(wqe_cmd_type, &abts_wqe->abort_cmd.wqe_com, OTHER_COMMAND); - bf_set(wqe_wqec, &abts_wqe->abort_cmd.wqe_com, 1); - bf_set(wqe_cqid, &abts_wqe->abort_cmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); + lpfc_nvme_prep_abort_wqe(abts_wqeq, ctxp->wqeq->sli4_xritag, opt); /* ABTS WQE must go to the same WQ as the WQE to be aborted */ abts_wqeq->hba_wqidx = ctxp->wqeq->hba_wqidx; abts_wqeq->wqe_cmpl = lpfc_nvmet_sol_fcp_abort_cmp; - abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_cmpl = NULL; abts_wqeq->iocb_flag |= LPFC_IO_NVME; abts_wqeq->context2 = ctxp; abts_wqeq->vport = phba->pport; @@ -3499,7 +3452,7 @@ lpfc_nvmet_unsol_ls_issue_abort(struct lpfc_hba *phba, spin_lock_irqsave(&phba->hbalock, flags); abts_wqeq->wqe_cmpl = lpfc_nvmet_xmt_ls_abort_cmp; - abts_wqeq->iocb_cmpl = 0; + abts_wqeq->iocb_cmpl = NULL; abts_wqeq->iocb_flag |= LPFC_IO_NVME_LS; rc = lpfc_sli4_issue_wqe(phba, ctxp->hdwq, abts_wqeq); spin_unlock_irqrestore(&phba->hbalock, flags); diff --git a/drivers/scsi/lpfc/lpfc_nvmet.h b/drivers/scsi/lpfc/lpfc_nvmet.h index 8ff67deac10a..b80b1639b9a7 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.h +++ b/drivers/scsi/lpfc/lpfc_nvmet.h @@ -112,9 +112,7 @@ struct lpfc_nvmet_rcv_ctx { struct lpfc_hba *phba; struct lpfc_iocbq *wqeq; struct lpfc_iocbq *abort_wqeq; - dma_addr_t txrdy_phys; spinlock_t ctxlock; /* protect flag access */ - uint32_t *txrdy; uint32_t sid; uint32_t offset; uint16_t oxid; diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index f9df800e7067..2c7e0b22db2f 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -53,8 +53,6 @@ #define LPFC_RESET_WAIT 2 #define LPFC_ABORT_WAIT 2 -int _dump_buf_done = 1; - static char *dif_op_str[] = { "PROT_NORMAL", "PROT_READ_INSERT", @@ -89,63 +87,6 @@ lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *psb); static int lpfc_prot_group_type(struct lpfc_hba *phba, struct scsi_cmnd *sc); -static void -lpfc_debug_save_data(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) -{ - void *src, *dst; - struct scatterlist *sgde = scsi_sglist(cmnd); - - if (!_dump_buf_data) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9050 BLKGRD: ERROR %s _dump_buf_data is NULL\n", - __func__); - return; - } - - - if (!sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9051 BLKGRD: ERROR: data scatterlist is null\n"); - return; - } - - dst = (void *) _dump_buf_data; - while (sgde) { - src = sg_virt(sgde); - memcpy(dst, src, sgde->length); - dst += sgde->length; - sgde = sg_next(sgde); - } -} - -static void -lpfc_debug_save_dif(struct lpfc_hba *phba, struct scsi_cmnd *cmnd) -{ - void *src, *dst; - struct scatterlist *sgde = scsi_prot_sglist(cmnd); - - if (!_dump_buf_dif) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9052 BLKGRD: ERROR %s _dump_buf_data is NULL\n", - __func__); - return; - } - - if (!sgde) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, - "9053 BLKGRD: ERROR: prot scatterlist is null\n"); - return; - } - - dst = _dump_buf_dif; - while (sgde) { - src = sg_virt(sgde); - memcpy(dst, src, sgde->length); - dst += sgde->length; - sgde = sg_next(sgde); - } -} - static inline unsigned lpfc_cmd_blksize(struct scsi_cmnd *sc) { @@ -193,21 +134,21 @@ lpfc_sli4_set_rsp_sgl_last(struct lpfc_hba *phba, /** * lpfc_update_stats - Update statistical data for the command completion - * @phba: Pointer to HBA object. + * @vport: The virtual port on which this call is executing. * @lpfc_cmd: lpfc scsi command object pointer. * * This function is called when there is a command completion and this * function updates the statistical data for the command completion. **/ static void -lpfc_update_stats(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) +lpfc_update_stats(struct lpfc_vport *vport, struct lpfc_io_buf *lpfc_cmd) { + struct lpfc_hba *phba = vport->phba; struct lpfc_rport_data *rdata; struct lpfc_nodelist *pnode; struct scsi_cmnd *cmd = lpfc_cmd->pCmd; unsigned long flags; - struct Scsi_Host *shost = cmd->device->host; - struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; + struct Scsi_Host *shost = lpfc_shost_from_vport(vport); unsigned long latency; int i; @@ -537,29 +478,32 @@ lpfc_sli4_vport_delete_fcp_xri_aborted(struct lpfc_vport *vport) for (idx = 0; idx < phba->cfg_hdw_queue; idx++) { qp = &phba->sli4_hba.hdwq[idx]; - spin_lock(&qp->abts_scsi_buf_list_lock); + spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, - &qp->lpfc_abts_scsi_buf_list, list) { + &qp->lpfc_abts_io_buf_list, list) { + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) + continue; + if (psb->rdata && psb->rdata->pnode && psb->rdata->pnode->vport == vport) psb->rdata = NULL; } - spin_unlock(&qp->abts_scsi_buf_list_lock); + spin_unlock(&qp->abts_io_buf_list_lock); } spin_unlock_irqrestore(&phba->hbalock, iflag); } /** - * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort + * lpfc_sli4_io_xri_aborted - Fast-path process of fcp xri abort * @phba: pointer to lpfc hba data structure. * @axri: pointer to the fcp xri abort wcqe structure. * * This routine is invoked by the worker thread to process a SLI4 fast-path - * FCP aborted xri. + * FCP or NVME aborted xri. **/ void -lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri, int idx) +lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, int idx) { uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri); @@ -577,16 +521,23 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, qp = &phba->sli4_hba.hdwq[idx]; spin_lock_irqsave(&phba->hbalock, iflag); - spin_lock(&qp->abts_scsi_buf_list_lock); + spin_lock(&qp->abts_io_buf_list_lock); list_for_each_entry_safe(psb, next_psb, - &qp->lpfc_abts_scsi_buf_list, list) { + &qp->lpfc_abts_io_buf_list, list) { if (psb->cur_iocbq.sli4_xritag == xri) { - list_del(&psb->list); - qp->abts_scsi_io_bufs--; - psb->exch_busy = 0; + list_del_init(&psb->list); + psb->flags &= ~LPFC_SBUF_XBUSY; psb->status = IOSTAT_SUCCESS; - spin_unlock( - &qp->abts_scsi_buf_list_lock); + if (psb->cur_iocbq.iocb_flag & LPFC_IO_NVME) { + qp->abts_nvme_io_bufs--; + spin_unlock(&qp->abts_io_buf_list_lock); + spin_unlock_irqrestore(&phba->hbalock, iflag); + lpfc_sli4_nvme_xri_aborted(phba, axri, psb); + return; + } + qp->abts_scsi_io_bufs--; + spin_unlock(&qp->abts_io_buf_list_lock); + if (psb->rdata && psb->rdata->pnode) ndlp = psb->rdata->pnode; else @@ -605,17 +556,17 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, return; } } - spin_unlock(&qp->abts_scsi_buf_list_lock); + spin_unlock(&qp->abts_io_buf_list_lock); for (i = 1; i <= phba->sli.last_iotag; i++) { iocbq = phba->sli.iocbq_lookup[i]; - if (!(iocbq->iocb_flag & LPFC_IO_FCP) || - (iocbq->iocb_flag & LPFC_IO_LIBDFC)) + if (!(iocbq->iocb_flag & LPFC_IO_FCP) || + (iocbq->iocb_flag & LPFC_IO_LIBDFC)) continue; if (iocbq->sli4_xritag != xri) continue; psb = container_of(iocbq, struct lpfc_io_buf, cur_iocbq); - psb->exch_busy = 0; + psb->flags &= ~LPFC_SBUF_XBUSY; spin_unlock_irqrestore(&phba->hbalock, iflag); if (!list_empty(&pring->txq)) lpfc_worker_wake_up(phba); @@ -685,8 +636,9 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, IOCB_t *iocb; dma_addr_t pdma_phys_fcp_rsp; dma_addr_t pdma_phys_fcp_cmd; - uint32_t sgl_size, cpu, idx; + uint32_t cpu, idx; int tag; + struct fcp_cmd_rsp_buf *tmp = NULL; cpu = raw_smp_processor_id(); if (cmnd && phba->cfg_fcp_io_sched == LPFC_FCP_SCHED_BY_HDWQ) { @@ -704,9 +656,6 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, return NULL; } - sgl_size = phba->cfg_sg_dma_buf_size - - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp)); - /* Setup key fields in buffer that may have been changed * if other protocols used this buffer. */ @@ -721,9 +670,12 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, #ifdef CONFIG_SCSI_LPFC_DEBUG_FS lpfc_cmd->prot_data_type = 0; #endif - lpfc_cmd->fcp_cmnd = (lpfc_cmd->data + sgl_size); - lpfc_cmd->fcp_rsp = (struct fcp_rsp *)((uint8_t *)lpfc_cmd->fcp_cmnd + - sizeof(struct fcp_cmnd)); + tmp = lpfc_get_cmd_rsp_buf_per_hdwq(phba, lpfc_cmd); + if (!tmp) + return NULL; + + lpfc_cmd->fcp_cmnd = tmp->fcp_cmnd; + lpfc_cmd->fcp_rsp = tmp->fcp_rsp; /* * The first two SGEs are the FCP_CMD and FCP_RSP. @@ -731,7 +683,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, * first two and leave the rest for queuecommand. */ sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl; - pdma_phys_fcp_cmd = (lpfc_cmd->dma_handle + sgl_size); + pdma_phys_fcp_cmd = tmp->fcp_cmd_rsp_dma_handle; sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd)); sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd)); sgl->word2 = le32_to_cpu(sgl->word2); @@ -834,12 +786,12 @@ lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *psb) psb->prot_seg_cnt = 0; qp = psb->hdwq; - if (psb->exch_busy) { - spin_lock_irqsave(&qp->abts_scsi_buf_list_lock, iflag); + if (psb->flags & LPFC_SBUF_XBUSY) { + spin_lock_irqsave(&qp->abts_io_buf_list_lock, iflag); psb->pCmd = NULL; - list_add_tail(&psb->list, &qp->lpfc_abts_scsi_buf_list); + list_add_tail(&psb->list, &qp->lpfc_abts_io_buf_list); qp->abts_scsi_io_bufs++; - spin_unlock_irqrestore(&qp->abts_scsi_buf_list_lock, iflag); + spin_unlock_irqrestore(&qp->abts_io_buf_list_lock, iflag); } else { lpfc_release_io_buf(phba, (struct lpfc_io_buf *)psb, qp); } @@ -918,9 +870,10 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); - return 1; + return 2; } /* @@ -1774,7 +1727,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, if (!sgpe || !sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9020 Invalid s/g entry: data=0x%p prot=0x%p\n", + "9020 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; } @@ -1989,7 +1942,8 @@ out: **/ static int lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, - struct sli4_sge *sgl, int datasegcnt) + struct sli4_sge *sgl, int datasegcnt, + struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct sli4_sge_diseed *diseed = NULL; @@ -2003,6 +1957,9 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, uint32_t checking = 1; uint32_t dma_len; uint32_t dma_offset = 0; + struct sli4_hybrid_sgl *sgl_xtra = NULL; + int j; + bool lsp_just_set = false; status = lpfc_sc_to_bg_opcodes(phba, sc, &txop, &rxop); if (status) @@ -2062,23 +2019,64 @@ lpfc_bg_setup_sgl(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgl++; /* assumption: caller has already run dma_map_sg on command data */ - scsi_for_each_sg(sc, sgde, datasegcnt, i) { - physaddr = sg_dma_address(sgde); - dma_len = sg_dma_len(sgde); - sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); - sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); - if ((i + 1) == datasegcnt) - bf_set(lpfc_sli4_sge_last, sgl, 1); - else - bf_set(lpfc_sli4_sge_last, sgl, 0); - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + sgde = scsi_sglist(sc); + j = 3; + for (i = 0; i < datasegcnt; i++) { + /* clear it */ + sgl->word2 = 0; - sgl->sge_len = cpu_to_le32(dma_len); - dma_offset += dma_len; + /* do we need to expand the segment */ + if (!lsp_just_set && !((j + 1) % phba->border_sge_num) && + ((datasegcnt - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + lpfc_cmd->seg_cnt = 0; + return 0; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & LPFC_SGE_TYPE_LSP)) { + if ((datasegcnt - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + physaddr = sg_dma_address(sgde); + dma_len = sg_dma_len(sgde); + sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + sgde = sg_next(sgde); + + sgl++; + num_sge++; + lsp_just_set = false; + + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; - sgl++; - num_sge++; } out: @@ -2124,7 +2122,8 @@ out: **/ static int lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, - struct sli4_sge *sgl, int datacnt, int protcnt) + struct sli4_sge *sgl, int datacnt, int protcnt, + struct lpfc_io_buf *lpfc_cmd) { struct scatterlist *sgde = NULL; /* s/g data entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */ @@ -2146,14 +2145,15 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, #endif uint32_t checking = 1; uint32_t dma_offset = 0; - int num_sge = 0; + int num_sge = 0, j = 2; + struct sli4_hybrid_sgl *sgl_xtra = NULL; sgpe = scsi_prot_sglist(sc); sgde = scsi_sglist(sc); if (!sgpe || !sgde) { lpfc_printf_log(phba, KERN_ERR, LOG_FCP, - "9082 Invalid s/g entry: data=0x%p prot=0x%p\n", + "9082 Invalid s/g entry: data=x%px prot=x%px\n", sgpe, sgde); return 0; } @@ -2179,9 +2179,37 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, split_offset = 0; do { /* Check to see if we ran out of space */ - if (num_sge >= (phba->cfg_total_seg_cnt - 2)) + if ((num_sge >= (phba->cfg_total_seg_cnt - 2)) && + !(phba->cfg_xpsgl)) return num_sge + 3; + /* DISEED and DIF have to be together */ + if (!((j + 1) % phba->border_sge_num) || + !((j + 2) % phba->border_sge_num) || + !((j + 3) % phba->border_sge_num)) { + sgl->word2 = 0; + + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + goto out; + } else { + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + } + + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + j = 0; + } + /* setup DISEED with what we have */ diseed = (struct sli4_sge_diseed *) sgl; memset(diseed, 0, sizeof(struct sli4_sge_diseed)); @@ -2228,7 +2256,9 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* advance sgl and increment bde count */ num_sge++; + sgl++; + j++; /* setup the first BDE that points to protection buffer */ protphysaddr = sg_dma_address(sgpe) + protgroup_offset; @@ -2243,6 +2273,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, sgl->addr_hi = le32_to_cpu(putPaddrHigh(protphysaddr)); sgl->addr_lo = le32_to_cpu(putPaddrLow(protphysaddr)); sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = 0; protgrp_blks = protgroup_len / 8; protgrp_bytes = protgrp_blks * blksize; @@ -2263,9 +2294,14 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, /* setup SGE's for data blocks associated with DIF data */ pgdone = 0; subtotal = 0; /* total bytes processed for current prot grp */ + + sgl++; + j++; + while (!pgdone) { /* Check to see if we ran out of space */ - if (num_sge >= phba->cfg_total_seg_cnt) + if ((num_sge >= phba->cfg_total_seg_cnt) && + !phba->cfg_xpsgl) return num_sge + 1; if (!sgde) { @@ -2274,60 +2310,101 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, __func__); return 0; } - sgl++; - dataphysaddr = sg_dma_address(sgde) + split_offset; - remainder = sg_dma_len(sgde) - split_offset; + if (!((j + 1) % phba->border_sge_num)) { + sgl->word2 = 0; - if ((subtotal + remainder) <= protgrp_bytes) { - /* we can use this whole buffer */ - dma_len = remainder; - split_offset = 0; + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); - if ((subtotal + remainder) == protgrp_bytes) - pgdone = 1; + sgl_xtra = lpfc_get_sgl_per_hdwq(phba, + lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + goto out; + } else { + sgl->addr_lo = cpu_to_le32( + putPaddrLow(sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32( + putPaddrHigh(sgl_xtra->dma_phys_sgl)); + } + + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; } else { - /* must split this buffer with next prot grp */ - dma_len = protgrp_bytes - subtotal; - split_offset += dma_len; - } + dataphysaddr = sg_dma_address(sgde) + + split_offset; - subtotal += dma_len; + remainder = sg_dma_len(sgde) - split_offset; - sgl->addr_lo = cpu_to_le32(putPaddrLow(dataphysaddr)); - sgl->addr_hi = cpu_to_le32(putPaddrHigh(dataphysaddr)); - bf_set(lpfc_sli4_sge_last, sgl, 0); - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); + if ((subtotal + remainder) <= protgrp_bytes) { + /* we can use this whole buffer */ + dma_len = remainder; + split_offset = 0; - sgl->sge_len = cpu_to_le32(dma_len); - dma_offset += dma_len; + if ((subtotal + remainder) == + protgrp_bytes) + pgdone = 1; + } else { + /* must split this buffer with next + * prot grp + */ + dma_len = protgrp_bytes - subtotal; + split_offset += dma_len; + } - num_sge++; - curr_data++; + subtotal += dma_len; - if (split_offset) - break; + sgl->word2 = 0; + sgl->addr_lo = cpu_to_le32(putPaddrLow( + dataphysaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + dataphysaddr)); + bf_set(lpfc_sli4_sge_last, sgl, 0); + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); - /* Move to the next s/g segment if possible */ - sgde = sg_next(sgde); + sgl->sge_len = cpu_to_le32(dma_len); + dma_offset += dma_len; + + num_sge++; + curr_data++; + + if (split_offset) { + sgl++; + j++; + break; + } + + /* Move to the next s/g segment if possible */ + sgde = sg_next(sgde); + + sgl++; + } + + j++; } if (protgroup_offset) { /* update the reference tag */ reftag += protgrp_blks; - sgl++; continue; } /* are we done ? */ if (curr_prot == protcnt) { + /* mark the last SGL */ + sgl--; bf_set(lpfc_sli4_sge_last, sgl, 1); alldone = 1; } else if (curr_prot < protcnt) { /* advance to next prot buffer */ sgpe = sg_next(sgpe); - sgl++; /* update the reference tag */ reftag += protgrp_blks; @@ -2430,7 +2507,10 @@ lpfc_bg_scsi_adjust_dl(struct lpfc_hba *phba, * * This is the protection/DIF aware version of * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the - * two functions eventually, but for now, it's here + * two functions eventually, but for now, it's here. + * RETURNS 0 - SUCCESS, + * 1 - Failed DMA map, retry. + * 2 - Invalid scsi cmd or prot-type. Do not rety. **/ static int lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, @@ -2444,6 +2524,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; + int ret = 1; struct lpfc_vport *vport = phba->pport; /* @@ -2467,8 +2548,11 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, lpfc_cmd->seg_cnt = datasegcnt; /* First check if data segment count from SCSI Layer is good */ - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + ret = 2; goto err; + } prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); @@ -2476,14 +2560,18 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, case LPFC_PG_TYPE_NO_DIF: /* Here we need to add a PDE5 and PDE6 to the count */ - if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) + if ((lpfc_cmd->seg_cnt + 2) > phba->cfg_total_seg_cnt) { + ret = 2; goto err; + } num_bde = lpfc_bg_setup_bpl(phba, scsi_cmnd, bpl, datasegcnt); /* we should have 2 or more entries in buffer list */ - if (num_bde < 2) + if (num_bde < 2) { + ret = 2; goto err; + } break; case LPFC_PG_TYPE_DIF_BUF: @@ -2507,15 +2595,19 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, * protection data segment. */ if ((lpfc_cmd->prot_seg_cnt * 4) > - (phba->cfg_total_seg_cnt - 2)) + (phba->cfg_total_seg_cnt - 2)) { + ret = 2; goto err; + } num_bde = lpfc_bg_setup_bpl_prot(phba, scsi_cmnd, bpl, datasegcnt, protsegcnt); /* we should have 3 or more entries in buffer list */ if ((num_bde < 3) || - (num_bde > phba->cfg_total_seg_cnt)) + (num_bde > phba->cfg_total_seg_cnt)) { + ret = 2; goto err; + } break; case LPFC_PG_TYPE_INVALID: @@ -2526,7 +2618,7 @@ lpfc_bg_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9022 Unexpected protection group %i\n", prot_group_type); - return 1; + return 2; } } @@ -2576,7 +2668,7 @@ err: lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; - return 1; + return ret; } /* @@ -2809,26 +2901,6 @@ lpfc_parse_bg_err(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd, uint32_t bgstat = bgf->bgstat; uint64_t failing_sector = 0; - spin_lock(&_dump_buf_lock); - if (!_dump_buf_done) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9070 BLKGRD: Saving" - " Data for %u blocks to debugfs\n", - (cmd->cmnd[7] << 8 | cmd->cmnd[8])); - lpfc_debug_save_data(phba, cmd); - - /* If we have a prot sgl, save the DIF buffer */ - if (lpfc_prot_group_type(phba, cmd) == - LPFC_PG_TYPE_DIF_BUF) { - lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9071 BLKGRD: " - "Saving DIF for %u blocks to debugfs\n", - (cmd->cmnd[7] << 8 | cmd->cmnd[8])); - lpfc_debug_save_dif(phba, cmd); - } - - _dump_buf_done = 1; - } - spin_unlock(&_dump_buf_lock); - if (lpfc_bgs_get_invalid_prof(bgstat)) { cmd->result = DID_ERROR << 16; lpfc_printf_log(phba, KERN_WARNING, LOG_FCP | LOG_BG, @@ -2962,7 +3034,8 @@ out: * field of @lpfc_cmd for device with SLI-4 interface spec. * * Return codes: - * 1 - Error + * 2 - Error - Do not retry + * 1 - Error - Retry * 0 - Success **/ static int @@ -2978,8 +3051,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) uint32_t num_bde = 0; uint32_t dma_len; uint32_t dma_offset = 0; - int nseg; + int nseg, i, j; struct ulp_bde64 *bde; + bool lsp_just_set = false; + struct sli4_hybrid_sgl *sgl_xtra = NULL; /* * There are three possibilities here - use scatter-gather segment, use @@ -3006,15 +3081,17 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) sgl += 1; first_data_sgl = sgl; lpfc_cmd->seg_cnt = nseg; - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { + if (!phba->cfg_xpsgl && + lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9074 BLKGRD:" " %s: Too many sg segments from " "dma_map_sg. Config %d, seg_cnt %d\n", __func__, phba->cfg_sg_seg_cnt, lpfc_cmd->seg_cnt); + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); lpfc_cmd->seg_cnt = 0; scsi_dma_unmap(scsi_cmnd); - return 1; + return 2; } /* @@ -3026,22 +3103,80 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * the IOCB. If it can't then the BDEs get added to a BPL as it * does for SLI-2 mode. */ - scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) { - physaddr = sg_dma_address(sgel); - dma_len = sg_dma_len(sgel); - sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr)); - sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr)); - sgl->word2 = le32_to_cpu(sgl->word2); - if ((num_bde + 1) == nseg) + + /* for tracking segment boundaries */ + sgel = scsi_sglist(scsi_cmnd); + j = 2; + for (i = 0; i < nseg; i++) { + sgl->word2 = 0; + if ((num_bde + 1) == nseg) { bf_set(lpfc_sli4_sge_last, sgl, 1); - else + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } else { bf_set(lpfc_sli4_sge_last, sgl, 0); - bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); - bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); - sgl->word2 = cpu_to_le32(sgl->word2); - sgl->sge_len = cpu_to_le32(dma_len); - dma_offset += dma_len; - sgl++; + + /* do we need to expand the segment */ + if (!lsp_just_set && + !((j + 1) % phba->border_sge_num) && + ((nseg - 1) != i)) { + /* set LSP type */ + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_LSP); + + sgl_xtra = lpfc_get_sgl_per_hdwq( + phba, lpfc_cmd); + + if (unlikely(!sgl_xtra)) { + lpfc_cmd->seg_cnt = 0; + scsi_dma_unmap(scsi_cmnd); + return 1; + } + sgl->addr_lo = cpu_to_le32(putPaddrLow( + sgl_xtra->dma_phys_sgl)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + sgl_xtra->dma_phys_sgl)); + + } else { + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); + } + } + + if (!(bf_get(lpfc_sli4_sge_type, sgl) & + LPFC_SGE_TYPE_LSP)) { + if ((nseg - 1) == i) + bf_set(lpfc_sli4_sge_last, sgl, 1); + + physaddr = sg_dma_address(sgel); + dma_len = sg_dma_len(sgel); + sgl->addr_lo = cpu_to_le32(putPaddrLow( + physaddr)); + sgl->addr_hi = cpu_to_le32(putPaddrHigh( + physaddr)); + + bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32(dma_len); + + dma_offset += dma_len; + sgel = sg_next(sgel); + + sgl++; + lsp_just_set = false; + + } else { + sgl->word2 = cpu_to_le32(sgl->word2); + sgl->sge_len = cpu_to_le32( + phba->cfg_sg_dma_buf_size); + + sgl = (struct sli4_sge *)sgl_xtra->dma_sgl; + i = i - 1; + + lsp_just_set = true; + } + + j++; } /* * Setup the first Payload BDE. For FCoE we just key off @@ -3110,6 +3245,10 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd) * This is the protection/DIF aware version of * lpfc_scsi_prep_dma_buf(). It may be a good idea to combine the * two functions eventually, but for now, it's here + * Return codes: + * 2 - Error - Do not retry + * 1 - Error - Retry + * 0 - Success **/ static int lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, @@ -3123,6 +3262,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, int datasegcnt, protsegcnt, datadir = scsi_cmnd->sc_data_direction; int prot_group_type = 0; int fcpdl; + int ret = 1; struct lpfc_vport *vport = phba->pport; /* @@ -3152,23 +3292,33 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, lpfc_cmd->seg_cnt = datasegcnt; /* First check if data segment count from SCSI Layer is good */ - if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) + if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt && + !phba->cfg_xpsgl) { + WARN_ON_ONCE(lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt); + ret = 2; goto err; + } prot_group_type = lpfc_prot_group_type(phba, scsi_cmnd); switch (prot_group_type) { case LPFC_PG_TYPE_NO_DIF: /* Here we need to add a DISEED to the count */ - if ((lpfc_cmd->seg_cnt + 1) > phba->cfg_total_seg_cnt) + if (((lpfc_cmd->seg_cnt + 1) > + phba->cfg_total_seg_cnt) && + !phba->cfg_xpsgl) { + ret = 2; goto err; + } num_sge = lpfc_bg_setup_sgl(phba, scsi_cmnd, sgl, - datasegcnt); + datasegcnt, lpfc_cmd); /* we should have 2 or more entries in buffer list */ - if (num_sge < 2) + if (num_sge < 2) { + ret = 2; goto err; + } break; case LPFC_PG_TYPE_DIF_BUF: @@ -3190,17 +3340,23 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, * There is a minimun of 3 SGEs used for every * protection data segment. */ - if ((lpfc_cmd->prot_seg_cnt * 3) > - (phba->cfg_total_seg_cnt - 2)) + if (((lpfc_cmd->prot_seg_cnt * 3) > + (phba->cfg_total_seg_cnt - 2)) && + !phba->cfg_xpsgl) { + ret = 2; goto err; + } num_sge = lpfc_bg_setup_sgl_prot(phba, scsi_cmnd, sgl, - datasegcnt, protsegcnt); + datasegcnt, protsegcnt, lpfc_cmd); /* we should have 3 or more entries in buffer list */ - if ((num_sge < 3) || - (num_sge > phba->cfg_total_seg_cnt)) + if (num_sge < 3 || + (num_sge > phba->cfg_total_seg_cnt && + !phba->cfg_xpsgl)) { + ret = 2; goto err; + } break; case LPFC_PG_TYPE_INVALID: @@ -3211,7 +3367,7 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, lpfc_printf_log(phba, KERN_ERR, LOG_FCP, "9083 Unexpected protection group %i\n", prot_group_type); - return 1; + return 2; } } @@ -3273,7 +3429,7 @@ err: lpfc_cmd->seg_cnt = 0; lpfc_cmd->prot_seg_cnt = 0; - return 1; + return ret; } /** @@ -3656,7 +3812,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, /* Sanity check on return of outstanding command */ cmd = lpfc_cmd->pCmd; - if (!cmd) { + if (!cmd || !phba) { lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, "2621 IO completion: Not an active IO\n"); spin_unlock(&lpfc_cmd->buf_lock); @@ -3668,7 +3824,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, phba->sli4_hba.hdwq[idx].scsi_cstat.io_cmpls++; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { + if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) { cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT && phba->sli4_hba.hdwq) phba->sli4_hba.hdwq[idx].cpucheck_cmpl_io[cpu]++; @@ -3679,7 +3835,10 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->result = (pIocbOut->iocb.un.ulpWord[4] & IOERR_PARAM_MASK); lpfc_cmd->status = pIocbOut->iocb.ulpStatus; /* pick up SLI4 exhange busy status from HBA */ - lpfc_cmd->exch_busy = pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY; + if (pIocbOut->iocb_flag & LPFC_EXCHANGE_BUSY) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; #ifdef CONFIG_SCSI_LPFC_DEBUG_FS if (lpfc_cmd->prot_data_type) { @@ -3713,7 +3872,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, } #endif - if (lpfc_cmd->status) { + if (unlikely(lpfc_cmd->status)) { if (lpfc_cmd->status == IOSTAT_LOCAL_REJECT && (lpfc_cmd->result & IOERR_DRVR_MASK)) lpfc_cmd->status = IOSTAT_DRIVER_REJECT; @@ -3839,14 +3998,14 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, uint32_t *lp = (uint32_t *)cmd->sense_buffer; lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "0710 Iodone <%d/%llu> cmd %p, error " + "0710 Iodone <%d/%llu> cmd x%px, error " "x%x SNS x%x x%x Data: x%x x%x\n", cmd->device->id, cmd->device->lun, cmd, cmd->result, *lp, *(lp + 3), cmd->retries, scsi_get_resid(cmd)); } - lpfc_update_stats(phba, lpfc_cmd); + lpfc_update_stats(vport, lpfc_cmd); if (vport->cfg_max_scsicmpl_time && time_after(jiffies, lpfc_cmd->start_time + msecs_to_jiffies(vport->cfg_max_scsicmpl_time))) { @@ -4454,13 +4613,18 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) err = lpfc_scsi_prep_dma_buf(phba, lpfc_cmd); } - if (err) + if (unlikely(err)) { + if (err == 2) { + cmnd->result = DID_ERROR << 16; + goto out_fail_command_release_buf; + } goto out_host_busy_free_buf; + } lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp); #ifdef CONFIG_SCSI_LPFC_DEBUG_FS - if (phba->cpucheck_on & LPFC_CHECK_SCSI_IO) { + if (unlikely(phba->cpucheck_on & LPFC_CHECK_SCSI_IO)) { cpu = raw_smp_processor_id(); if (cpu < LPFC_CHECK_CPU_CNT) { struct lpfc_sli4_hdw_queue *hdwq = @@ -4526,6 +4690,9 @@ lpfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd) out_tgt_busy: return SCSI_MLQUEUE_TARGET_BUSY; + out_fail_command_release_buf: + lpfc_release_scsi_buf(phba, lpfc_cmd); + out_fail_command: cmnd->scsi_done(cmnd); return 0; @@ -4568,7 +4735,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) spin_lock_irqsave(&phba->hbalock, flags); /* driver queued commands are in process of being flushed */ - if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + if (phba->hba_flag & HBA_IOQ_FLUSH) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, "3168 SCSI Layer abort requested I/O has been " "flushed by LLD.\n"); @@ -4589,7 +4756,7 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) iocb = &lpfc_cmd->cur_iocbq; if (phba->sli_rev == LPFC_SLI_REV4) { - pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].fcp_wq->pring; + pring_s4 = phba->sli4_hba.hdwq[iocb->hba_wqidx].io_wq->pring; if (!pring_s4) { ret = FAILED; goto out_unlock_buf; @@ -4680,20 +4847,21 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) ret_val = __lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0); } - /* no longer need the lock after this point */ - spin_unlock_irqrestore(&phba->hbalock, flags); if (ret_val == IOCB_ERROR) { /* Indicate the IO is not being aborted by the driver. */ iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED; lpfc_cmd->waitq = NULL; spin_unlock(&lpfc_cmd->buf_lock); + spin_unlock_irqrestore(&phba->hbalock, flags); lpfc_sli_release_iocbq(phba, abtsiocb); ret = FAILED; goto out; } + /* no longer need the lock after this point */ spin_unlock(&lpfc_cmd->buf_lock); + spin_unlock_irqrestore(&phba->hbalock, flags); if (phba->cfg_poll & DISABLE_FCP_RING_INT) lpfc_sli_handle_fast_ring_event(phba, @@ -4956,7 +5124,7 @@ lpfc_chk_tgt_mapped(struct lpfc_vport *vport, struct scsi_cmnd *cmnd) rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata) { lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, - "0797 Tgt Map rport failure: rdata x%p\n", rdata); + "0797 Tgt Map rport failure: rdata x%px\n", rdata); return FAILED; } pnode = rdata->pnode; @@ -5054,7 +5222,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0798 Device Reset rdata failure: rdata x%p\n", + "0798 Device Reset rdata failure: rdata x%px\n", rdata); return FAILED; } @@ -5066,7 +5234,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0721 Device Reset rport failure: rdata x%p\n", rdata); + "0721 Device Reset rport failure: rdata x%px\n", rdata); return FAILED; } @@ -5125,7 +5293,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) rdata = lpfc_rport_data_from_scsi_device(cmnd->device); if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0799 Target Reset rdata failure: rdata x%p\n", + "0799 Target Reset rdata failure: rdata x%px\n", rdata); return FAILED; } @@ -5137,7 +5305,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) status = lpfc_chk_tgt_mapped(vport, cmnd); if (status == FAILED) { lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "0722 Target Reset rport failure: rdata x%p\n", rdata); + "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { spin_lock_irq(shost->host_lock); pnode->nlp_flag &= ~NLP_NPR_ADISC; @@ -5295,18 +5463,20 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd) lpfc_offline(phba); rc = lpfc_sli_brdrestart(phba); if (rc) - ret = FAILED; + goto error; + rc = lpfc_online(phba); if (rc) - ret = FAILED; + goto error; + lpfc_unblock_mgmt_io(phba); - if (ret == FAILED) { - lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, - "3323 Failed host reset, bring it offline\n"); - lpfc_sli4_offline_eratt(phba); - } return ret; +error: + lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP, + "3323 Failed host reset\n"); + lpfc_unblock_mgmt_io(phba); + return FAILED; } /** @@ -5870,7 +6040,7 @@ struct scsi_host_template lpfc_template_no_hr = { .sg_tablesize = LPFC_DEFAULT_SG_SEG_CNT, .cmd_per_lun = LPFC_CMD_PER_LUN, .shost_attrs = lpfc_hba_attrs, - .max_sectors = 0xFFFF, + .max_sectors = 0xFFFFFFFF, .vendor_id = LPFC_NL_VENDOR_ID, .change_queue_depth = scsi_change_queue_depth, .track_queue_depth = 1, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index f9e6a135d656..64002b0cb02d 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -87,6 +87,10 @@ static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe); static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba); static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba); +static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q); +static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, + struct lpfc_queue *cq, + struct lpfc_cqe *cqe); static IOCB_t * lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq) @@ -467,25 +471,52 @@ __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq, } static void -lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) +lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq) { - struct lpfc_eqe *eqe; - uint32_t count = 0; + struct lpfc_eqe *eqe = NULL; + u32 eq_count = 0, cq_count = 0; + struct lpfc_cqe *cqe = NULL; + struct lpfc_queue *cq = NULL, *childq = NULL; + int cqid = 0; /* walk all the EQ entries and drop on the floor */ eqe = lpfc_sli4_eq_get(eq); while (eqe) { + /* Get the reference to the corresponding CQ */ + cqid = bf_get_le32(lpfc_eqe_resource_id, eqe); + cq = NULL; + + list_for_each_entry(childq, &eq->child_list, list) { + if (childq->queue_id == cqid) { + cq = childq; + break; + } + } + /* If CQ is valid, iterate through it and drop all the CQEs */ + if (cq) { + cqe = lpfc_sli4_cq_get(cq); + while (cqe) { + __lpfc_sli4_consume_cqe(phba, cq, cqe); + cq_count++; + cqe = lpfc_sli4_cq_get(cq); + } + /* Clear and re-arm the CQ */ + phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count, + LPFC_QUEUE_REARM); + cq_count = 0; + } __lpfc_sli4_consume_eqe(phba, eq, eqe); - count++; + eq_count++; eqe = lpfc_sli4_eq_get(eq); } /* Clear and re-arm the EQ */ - phba->sli4_hba.sli4_write_eq_db(phba, eq, count, LPFC_QUEUE_REARM); + phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM); } static int -lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) +lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq, + uint8_t rearm) { struct lpfc_eqe *eqe; int count = 0, consumed = 0; @@ -519,8 +550,8 @@ lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq) eq->queue_claimed = 0; rearm_and_exit: - /* Always clear and re-arm the EQ */ - phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, LPFC_QUEUE_REARM); + /* Always clear the EQ. */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm); return count; } @@ -1391,9 +1422,12 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist, while (!list_empty(iocblist)) { list_remove_head(iocblist, piocb, struct lpfc_iocbq, list); - if (!piocb->iocb_cmpl) - lpfc_sli_release_iocbq(phba, piocb); - else { + if (!piocb->iocb_cmpl) { + if (piocb->iocb_flag & LPFC_IO_NVME) + lpfc_nvme_cancel_iocb(phba, piocb); + else + lpfc_sli_release_iocbq(phba, piocb); + } else { piocb->iocb.ulpStatus = ulpstatus; piocb->iocb.un.ulpWord[4] = ulpWord4; (piocb->iocb_cmpl) (phba, piocb, piocb); @@ -2426,6 +2460,20 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } +static void +__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) +{ + unsigned long iflags; + + if (ndlp->nlp_flag & NLP_RELEASE_RPI) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + spin_lock_irqsave(&vport->phba->ndlp_lock, iflags); + ndlp->nlp_flag &= ~NLP_RELEASE_RPI; + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + spin_unlock_irqrestore(&vport->phba->ndlp_lock, iflags); + } + ndlp->nlp_flag &= ~NLP_UNREG_INP; +} /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler @@ -2497,7 +2545,7 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x %p\n", + "on NPort x%x Data: x%x x%x %px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp); @@ -2507,8 +2555,10 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + __lpfc_sli_rpi_release(vport, ndlp); } + if (vport->load_flag & FC_UNLOADING) + lpfc_nlp_put(ndlp); pmb->ctx_ndlp = NULL; } } @@ -2555,7 +2605,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI, "0010 UNREG_LOGIN vpi:%x " "rpi:%x DID:%x defer x%x flg x%x " - "map:%x %p\n", + "map:%x %px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, @@ -2573,7 +2623,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_DISCOVERY, "4111 UNREG cmpl deferred " "clr x%x on " - "NPort x%x Data: x%x %p\n", + "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); ndlp->nlp_flag &= ~NLP_UNREG_INP; @@ -2582,7 +2632,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); } else { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + __lpfc_sli_rpi_release(vport, ndlp); } } } @@ -2655,7 +2705,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " "x%x (x%x/x%x) Cmpl\n", - pmb->vport ? pmb->vport->vpi : 0, + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2676,7 +2727,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) "(%d):0305 Mbox cmd cmpl " "error - RETRYing Data: x%x " "(x%x/x%x) x%x x%x x%x\n", - pmb->vport ? pmb->vport->vpi : 0, + pmb->vport ? pmb->vport->vpi : + LPFC_VPORT_UNKNOWN, pmbox->mbxCommand, lpfc_sli_config_mbox_subsys_get(phba, pmb), @@ -2684,7 +2736,8 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) pmb), pmbox->mbxStatus, pmbox->un.varWords[0], - pmb->vport->port_state); + pmb->vport ? pmb->vport->port_state : + LPFC_VPORT_UNKNOWN); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -2695,7 +2748,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " + "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x " "x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, @@ -3961,7 +4014,7 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; lpfc_sli_abort_iocb_ring(phba, pring); } } else { @@ -3971,17 +4024,17 @@ lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring + * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring * @phba: Pointer to HBA context object. * - * This function flushes all iocbs in the fcp ring and frees all the iocb + * This function flushes all iocbs in the IO ring and frees all the iocb * objects in txq and txcmplq. This function will not issue abort iocbs * for all the iocb commands in txcmplq, they will just be returned with * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI * slot has been permanently disabled. **/ void -lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) +lpfc_sli_flush_io_rings(struct lpfc_hba *phba) { LIST_HEAD(txq); LIST_HEAD(txcmplq); @@ -3992,13 +4045,13 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); /* Indicate the I/O queues are flushed */ - phba->hba_flag |= HBA_FCP_IOQ_FLUSH; + phba->hba_flag |= HBA_IOQ_FLUSH; spin_unlock_irq(&phba->hbalock); /* Look on all the FCP Rings for the iotag */ if (phba->sli_rev >= LPFC_SLI_REV4) { for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; spin_lock_irq(&pring->ring_lock); /* Retrieve everything on txq */ @@ -4046,56 +4099,6 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba) } /** - * lpfc_sli_flush_nvme_rings - flush all wqes in the nvme rings - * @phba: Pointer to HBA context object. - * - * This function flushes all wqes in the nvme rings and frees all resources - * in the txcmplq. This function does not issue abort wqes for the IO - * commands in txcmplq, they will just be returned with - * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI - * slot has been permanently disabled. - **/ -void -lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba) -{ - LIST_HEAD(txcmplq); - struct lpfc_sli_ring *pring; - uint32_t i; - struct lpfc_iocbq *piocb, *next_iocb; - - if ((phba->sli_rev < LPFC_SLI_REV4) || - !(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) - return; - - /* Hint to other driver operations that a flush is in progress. */ - spin_lock_irq(&phba->hbalock); - phba->hba_flag |= HBA_NVME_IOQ_FLUSH; - spin_unlock_irq(&phba->hbalock); - - /* Cycle through all NVME rings and complete each IO with - * a local driver reason code. This is a flush so no - * abort exchange to FW. - */ - for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; - - spin_lock_irq(&pring->ring_lock); - list_for_each_entry_safe(piocb, next_iocb, - &pring->txcmplq, list) - piocb->iocb_flag &= ~LPFC_IO_ON_TXCMPLQ; - /* Retrieve everything on the txcmplq */ - list_splice_init(&pring->txcmplq, &txcmplq); - pring->txcmplq_cnt = 0; - spin_unlock_irq(&pring->ring_lock); - - /* Flush the txcmpq &&&PAE */ - lpfc_sli_cancel_iocbs(phba, &txcmplq, - IOSTAT_LOCAL_REJECT, - IOERR_SLI_DOWN); - } -} - -/** * lpfc_sli_brdready_s3 - Check for sli3 host ready status * @phba: Pointer to HBA context object. * @mask: Bit mask to be checked. @@ -4495,7 +4498,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba) * checking during resets the device. The caller is not required to hold * any locks. * - * This function returns 0 always. + * This function returns 0 on success else returns negative error code. **/ int lpfc_sli4_brdreset(struct lpfc_hba *phba) @@ -4652,8 +4655,10 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) hba_aer_enabled = phba->hba_flag & HBA_AER_ENABLED; rc = lpfc_sli4_brdreset(phba); - if (rc) - return rc; + if (rc) { + phba->link_state = LPFC_HBA_ERROR; + goto hba_down_queue; + } spin_lock_irq(&phba->hbalock); phba->pport->stopped = 0; @@ -4668,6 +4673,7 @@ lpfc_sli_brdrestart_s4(struct lpfc_hba *phba) if (hba_aer_enabled) pci_disable_pcie_error_reporting(phba->pcidev); +hba_down_queue: lpfc_hba_down_post(phba); lpfc_sli4_queue_destroy(phba); @@ -4912,8 +4918,17 @@ static int lpfc_sli4_rb_setup(struct lpfc_hba *phba) { phba->hbq_in_use = 1; - phba->hbqs[LPFC_ELS_HBQ].entry_count = - lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; + /** + * Specific case when the MDS diagnostics is enabled and supported. + * The receive buffer count is truncated to manage the incoming + * traffic. + **/ + if (phba->cfg_enable_mds_diags && phba->mds_diags_support) + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1; + else + phba->hbqs[LPFC_ELS_HBQ].entry_count = + lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count; phba->hbq_count = 1; lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ); /* Initially populate or replenish the HBQs */ @@ -5584,10 +5599,8 @@ lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba) for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) { qp = &sli4_hba->hdwq[qidx]; /* ARM the corresponding CQ */ - sli4_hba->sli4_write_cq_db(phba, qp->fcp_cq, 0, - LPFC_QUEUE_REARM); - sli4_hba->sli4_write_cq_db(phba, qp->nvme_cq, 0, - LPFC_QUEUE_REARM); + sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0, + LPFC_QUEUE_REARM); } /* Loop thru all IRQ vectors */ @@ -6199,6 +6212,14 @@ lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox, mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS; mbox->u.mqe.un.set_feature.param_len = 8; break; + case LPFC_SET_DUAL_DUMP: + bf_set(lpfc_mbx_set_feature_dd, + &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP); + bf_set(lpfc_mbx_set_feature_ddquery, + &mbox->u.mqe.un.set_feature, 0); + mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP; + mbox->u.mqe.un.set_feature.param_len = 4; + break; } return; @@ -6216,11 +6237,16 @@ lpfc_ras_stop_fwlog(struct lpfc_hba *phba) { struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog; - ras_fwlog->ras_active = false; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); /* Disable FW logging to host memory */ writel(LPFC_CTL_PDEV_CTL_DDL_RAS, phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET); + + /* Wait 10ms for firmware to stop using DMA buffer */ + usleep_range(10 * 1000, 20 * 1000); } /** @@ -6256,7 +6282,9 @@ lpfc_sli4_ras_dma_free(struct lpfc_hba *phba) ras_fwlog->lwpd.virt = NULL; } - ras_fwlog->ras_active = false; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); } /** @@ -6358,7 +6386,9 @@ lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) goto disable_ras; } - ras_fwlog->ras_active = true; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = ACTIVE; + spin_unlock_irq(&phba->hbalock); mempool_free(pmb, phba->mbox_mem_pool); return; @@ -6390,6 +6420,10 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, uint32_t len = 0, fwlog_buffsize, fwlog_entry_count; int rc = 0; + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = INACTIVE; + spin_unlock_irq(&phba->hbalock); + fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE * phba->cfg_ras_fwlog_buffsize); fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE); @@ -6449,6 +6483,9 @@ lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba, mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys); mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys); + spin_lock_irq(&phba->hbalock); + ras_fwlog->state = REG_INPROGRESS; + spin_unlock_irq(&phba->hbalock); mbox->vport = phba->pport; mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl; @@ -7180,7 +7217,7 @@ lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq, int lpfc_sli4_hba_setup(struct lpfc_hba *phba) { - int rc, i, cnt, len; + int rc, i, cnt, len, dd; LPFC_MBOXQ_t *mboxq; struct lpfc_mqe *mqe; uint8_t *vpd; @@ -7243,7 +7280,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) else phba->hba_flag &= ~HBA_FIP_SUPPORT; - phba->hba_flag &= ~HBA_FCP_IOQ_FLUSH; + phba->hba_flag &= ~HBA_IOQ_FLUSH; if (phba->sli_rev != LPFC_SLI_REV4) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -7431,6 +7468,23 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED); spin_unlock_irq(&phba->hbalock); + /* Always try to enable dual dump feature if we can */ + lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature); + if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP)) + lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_INIT, + "6448 Dual Dump is enabled\n"); + else + lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT, + "6447 Dual Dump Mailbox x%x (x%x/x%x) failed, " + "rc:x%x dd:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get( + phba, mboxq), + lpfc_sli_config_mbox_opcode_get( + phba, mboxq), + rc, dd); /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -7555,9 +7609,11 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) } phba->sli4_hba.nvmet_xri_cnt = rc; - cnt = phba->cfg_iocb_cnt * 1024; - /* We need 1 iocbq for every SGL, for IO processing */ - cnt += phba->sli4_hba.nvmet_xri_cnt; + /* We allocate an iocbq for every receive context SGL. + * The additional allocation is for abort and ls handling. + */ + cnt = phba->sli4_hba.nvmet_xri_cnt + + phba->sli4_hba.max_cfg_param.max_xri; } else { /* update host common xri-sgl sizes and mappings */ rc = lpfc_sli4_io_sgl_update(phba); @@ -7579,14 +7635,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_destroy_queue; } - cnt = phba->cfg_iocb_cnt * 1024; + /* Each lpfc_io_buf job structure has an iocbq element. + * This cnt provides for abort, els, ct and ls requests. + */ + cnt = phba->sli4_hba.max_cfg_param.max_xri; } if (!phba->sli.iocbq_lookup) { /* Initialize and populate the iocb list per host */ lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "2821 initialize iocb list %d total %d\n", - phba->cfg_iocb_cnt, cnt); + "2821 initialize iocb list with %d entries\n", + cnt); rc = lpfc_init_iocb_list(phba, cnt); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -7898,7 +7957,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (sli4_hba->hdwq) { for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) { eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq; - if (eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { + if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) { fpeq = eq; break; } @@ -7924,7 +7983,7 @@ lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba) if (mbox_pending) /* process and rearm the EQ */ - lpfc_sli4_process_eq(phba, fpeq); + lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); else /* Always clear and re-arm the EQ */ sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM); @@ -7972,7 +8031,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba) /* Mbox cmd <mbxCommand> timeout */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "0310 Mailbox command x%x timeout Data: x%x x%x x%p\n", + "0310 Mailbox command x%x timeout Data: x%x x%x x%px\n", mb->mbxCommand, phba->pport->port_state, phba->sli.sli_flag, @@ -8505,7 +8564,7 @@ lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba) psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); - /* wake up worker thread to post asynchronlous mailbox command */ + /* wake up worker thread to post asynchronous mailbox command */ lpfc_worker_wake_up(phba); } @@ -8773,7 +8832,7 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, return rc; } - /* Now, interrupt mode asynchrous mailbox command */ + /* Now, interrupt mode asynchronous mailbox command */ rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, @@ -8996,7 +9055,8 @@ lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp) * @pring: Pointer to driver SLI ring object. * @piocb: Pointer to address of newly added command iocb. * - * This function is called with hbalock held to add a command + * This function is called with hbalock held for SLI3 ports or + * the ring lock held for SLI4 ports to add a command * iocb to the txq when SLI layer cannot submit the command iocb * to the ring. **/ @@ -9004,7 +9064,10 @@ void __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *piocb) { - lockdep_assert_held(&phba->hbalock); + if (phba->sli_rev == LPFC_SLI_REV4) + lockdep_assert_held(&pring->ring_lock); + else + lockdep_assert_held(&phba->hbalock); /* Insert the caller's iocb in the txq tail for later processing. */ list_add_tail(&piocb->list, &pring->txq); } @@ -9333,11 +9396,9 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, memset(wqe, 0, sizeof(union lpfc_wqe128)); /* Some of the fields are in the right position already */ memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe)); - if (iocbq->iocb.ulpCommand != CMD_SEND_FRAME) { - /* The ct field has moved so reset */ - wqe->generic.wqe_com.word7 = 0; - wqe->generic.wqe_com.word10 = 0; - } + /* The ct field has moved so reset */ + wqe->generic.wqe_com.word7 = 0; + wqe->generic.wqe_com.word10 = 0; abort_tag = (uint32_t) iocbq->iotag; xritag = iocbq->sli4_xritag; @@ -9796,7 +9857,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * we re-construct this WQE here based on information in * iocbq from scratch. */ - memset(wqe, 0, sizeof(union lpfc_wqe)); + memset(wqe, 0, sizeof(*wqe)); /* OX_ID is invariable to who sent ABTS to CT exchange */ bf_set(xmit_bls_rsp64_oxid, &wqe->xmit_bls_rsp, bf_get(lpfc_abts_oxid, &iocbq->iocb.un.bls_rsp)); @@ -9843,6 +9904,15 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, break; case CMD_SEND_FRAME: + bf_set(wqe_cmnd, &wqe->generic.wqe_com, CMD_SEND_FRAME); + bf_set(wqe_sof, &wqe->generic.wqe_com, 0x2E); /* SOF byte */ + bf_set(wqe_eof, &wqe->generic.wqe_com, 0x41); /* EOF byte */ + bf_set(wqe_lenloc, &wqe->generic.wqe_com, 1); + bf_set(wqe_xbl, &wqe->generic.wqe_com, 1); + bf_set(wqe_dbde, &wqe->generic.wqe_com, 1); + bf_set(wqe_xc, &wqe->generic.wqe_com, 1); + bf_set(wqe_cmd_type, &wqe->generic.wqe_com, 0xA); + bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT); bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag); bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag); return 0; @@ -9888,7 +9958,7 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq, * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue * an iocb command to an HBA with SLI-4 interface spec. * - * This function is called with hbalock held. The function will return success + * This function is called with ringlock held. The function will return success * after it successfully submit the iocb to firmware or after adding to the * txq. **/ @@ -9904,7 +9974,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number, /* Get the WQ */ if ((piocb->iocb_flag & LPFC_IO_FCP) || (piocb->iocb_flag & LPFC_USE_FCPWQIDX)) { - wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq; + wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq; } else { wq = phba->sli4_hba.els_wq; } @@ -10051,7 +10121,7 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) lpfc_cmd = (struct lpfc_io_buf *)piocb->context1; piocb->hba_wqidx = lpfc_cmd->hdwq_no; } - return phba->sli4_hba.hdwq[piocb->hba_wqidx].fcp_wq->pring; + return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring; } else { if (unlikely(!phba->sli4_hba.els_wq)) return NULL; @@ -10078,10 +10148,13 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, struct lpfc_iocbq *piocb, uint32_t flag) { struct lpfc_sli_ring *pring; + struct lpfc_queue *eq; unsigned long iflags; int rc; if (phba->sli_rev == LPFC_SLI_REV4) { + eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq; + pring = lpfc_sli4_calc_ring(phba, piocb); if (unlikely(pring == NULL)) return IOCB_ERROR; @@ -10089,6 +10162,8 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, spin_lock_irqsave(&pring->ring_lock, iflags); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); } else { /* For now, SLI2/3 will still use hbalock */ spin_lock_irqsave(&phba->hbalock, iflags); @@ -10504,7 +10579,7 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba) INIT_LIST_HEAD(&psli->mboxq_cmpl); /* Initialize list headers for txq and txcmplq as double linked lists */ for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].fcp_wq->pring; + pring = phba->sli4_hba.hdwq[i].io_wq->pring; pring->flag = 0; pring->ringno = LPFC_FCP_RING; pring->txcmplq_cnt = 0; @@ -10523,16 +10598,6 @@ lpfc_sli4_queue_init(struct lpfc_hba *phba) spin_lock_init(&pring->ring_lock); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - for (i = 0; i < phba->cfg_hdw_queue; i++) { - pring = phba->sli4_hba.hdwq[i].nvme_wq->pring; - pring->flag = 0; - pring->ringno = LPFC_FCP_RING; - pring->txcmplq_cnt = 0; - INIT_LIST_HEAD(&pring->txq); - INIT_LIST_HEAD(&pring->txcmplq); - INIT_LIST_HEAD(&pring->iocb_continueq); - spin_lock_init(&pring->ring_lock); - } pring = phba->sli4_hba.nvmels_wq->pring; pring->flag = 0; pring->ringno = LPFC_ELS_RING; @@ -10713,14 +10778,14 @@ lpfc_sli_host_down(struct lpfc_vport *vport) set_bit(LPFC_DATA_READY, &phba->data_flags); } prev_pring_flag = pring->flag; - spin_lock_irq(&pring->ring_lock); + spin_lock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) { if (iocb->vport != vport) continue; list_move_tail(&iocb->list, &completions); } - spin_unlock_irq(&pring->ring_lock); + spin_unlock(&pring->ring_lock); list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) { if (iocb->vport != vport) @@ -10796,9 +10861,9 @@ lpfc_sli_hba_down(struct lpfc_hba *phba) pring = qp->pring; if (!pring) continue; - spin_lock_irq(&pring->ring_lock); + spin_lock(&pring->ring_lock); list_splice_init(&pring->txq, &completions); - spin_unlock_irq(&pring->ring_lock); + spin_unlock(&pring->ring_lock); if (pring == phba->sli4_hba.els_wq->pring) { pring->flag |= LPFC_DEFERRED_RING_EVENT; /* Set the lpfc data pending flag */ @@ -10979,7 +11044,7 @@ lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0402 Cannot find virtual addr for buffer tag on " - "ring %d Data x%lx x%p x%p x%x\n", + "ring %d Data x%lx x%px x%px x%x\n", pring->ringno, (unsigned long) tag, slp->next, slp->prev, pring->postbufq_cnt); @@ -11023,7 +11088,7 @@ lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, spin_unlock_irq(&phba->hbalock); lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0410 Cannot find virtual addr for mapped buf on " - "ring %d Data x%llx x%p x%p x%x\n", + "ring %d Data x%llx x%px x%px x%x\n", pring->ringno, (unsigned long long)phys, slp->next, slp->prev, pring->postbufq_cnt); return NULL; @@ -11078,7 +11143,7 @@ lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, abort_iocb = phba->sli.iocbq_lookup[abort_context]; lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI, - "0327 Cannot abort els iocb %p " + "0327 Cannot abort els iocb x%px " "with tag %x context %x, abort status %x, " "abort code %x\n", abort_iocb, abort_iotag, abort_context, @@ -11493,7 +11558,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, int i; /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) + if (phba->hba_flag & HBA_IOQ_FLUSH) return errcnt; for (i = 1; i <= phba->sli.last_iotag; i++) { @@ -11603,7 +11668,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, spin_lock_irqsave(&phba->hbalock, iflags); /* all I/Os are in process of being flushed */ - if (phba->hba_flag & HBA_FCP_IOQ_FLUSH) { + if (phba->hba_flag & HBA_IOQ_FLUSH) { spin_unlock_irqrestore(&phba->hbalock, iflags); return 0; } @@ -11627,7 +11692,7 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, if (phba->sli_rev == LPFC_SLI_REV4) { pring_s4 = - phba->sli4_hba.hdwq[iocbq->hba_wqidx].fcp_wq->pring; + phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring; if (!pring_s4) { spin_unlock(&lpfc_cmd->buf_lock); continue; @@ -11768,7 +11833,10 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, !(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, cur_iocbq); - lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY; + if (rspiocbq && (rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY)) + lpfc_cmd->flags |= LPFC_SBUF_XBUSY; + else + lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY; } pdone_q = cmdiocbq->context_un.wait_queue; @@ -13053,11 +13121,11 @@ lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size) } /** - * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event + * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event * @phba: Pointer to HBA context object. * @cqe: Pointer to mailbox completion queue entry. * - * This routine process a mailbox completion queue entry with asynchrous + * This routine process a mailbox completion queue entry with asynchronous * event. * * Return: true if work posted to worker thread, otherwise false. @@ -13190,13 +13258,19 @@ send_current_mbox: phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE; /* Setting active mailbox pointer need to be in sync to flag clear */ phba->sli.mbox_active = NULL; + if (bf_get(lpfc_trailer_consumed, mcqe)) + lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); spin_unlock_irqrestore(&phba->hbalock, iflags); /* Wake up worker thread to post the next pending mailbox command */ lpfc_worker_wake_up(phba); + return workposted; + out_no_mqe_complete: + spin_lock_irqsave(&phba->hbalock, iflags); if (bf_get(lpfc_trailer_consumed, mcqe)) lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq); - return workposted; + spin_unlock_irqrestore(&phba->hbalock, iflags); + return false; } /** @@ -13205,7 +13279,7 @@ out_no_mqe_complete: * @cqe: Pointer to mailbox completion queue entry. * * This routine process a mailbox completion queue entry, it invokes the - * proper mailbox complete handling or asynchrous event handling routine + * proper mailbox complete handling or asynchronous event handling routine * according to the MCQE's async bit. * * Return: true if work posted to worker thread, otherwise false. @@ -13249,7 +13323,6 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, struct lpfc_sli_ring *pring = cq->pring; int txq_cnt = 0; int txcmplq_cnt = 0; - int fcp_txcmplq_cnt = 0; /* Check for response status */ if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) { @@ -13271,9 +13344,8 @@ lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, txcmplq_cnt++; lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d " - "fcp_txcmplq_cnt=%d, els_txcmplq_cnt=%d\n", + "els_txcmplq_cnt=%d\n", txq_cnt, phba->iocb_cnt, - fcp_txcmplq_cnt, txcmplq_cnt); return false; } @@ -13336,8 +13408,13 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, unsigned long iflags; switch (cq->subtype) { - case LPFC_FCP: - lpfc_sli4_fcp_xri_aborted(phba, wcqe, cq->hdwq); + case LPFC_IO: + lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + /* Notify aborted XRI for NVME work queue */ + if (phba->nvmet_support) + lpfc_sli4_nvmet_xri_aborted(phba, wcqe); + } workposted = false; break; case LPFC_NVME_LS: /* NVME LS uses ELS resources */ @@ -13355,15 +13432,6 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba, spin_unlock_irqrestore(&phba->hbalock, iflags); workposted = true; break; - case LPFC_NVME: - /* Notify aborted XRI for NVME work queue */ - if (phba->nvmet_support) - lpfc_sli4_nvmet_xri_aborted(phba, wcqe); - else - lpfc_sli4_nvme_xri_aborted(phba, wcqe, cq->hdwq); - - workposted = false; - break; default: lpfc_printf_log(phba, KERN_ERR, LOG_SLI, "0603 Invalid CQ subtype %d: " @@ -13628,6 +13696,7 @@ __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq, phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed, LPFC_QUEUE_NOARM); consumed = 0; + cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK; } if (count == LPFC_NVMET_CQ_NOTIFY) @@ -13691,7 +13760,7 @@ __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq) &delay); break; case LPFC_WCQ: - if (cq->subtype == LPFC_FCP || cq->subtype == LPFC_NVME) + if (cq->subtype == LPFC_IO) workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe, &delay); @@ -14008,10 +14077,7 @@ lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq, cq->CQ_wq++; /* Process the WQ complete event */ phba->last_completion_time = jiffies; - if ((cq->subtype == LPFC_FCP) || (cq->subtype == LPFC_NVME)) - lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, - (struct lpfc_wcqe_complete *)&wcqe); - if (cq->subtype == LPFC_NVME_LS) + if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS) lpfc_sli4_fp_handle_fcp_wcqe(phba, cq, (struct lpfc_wcqe_complete *)&wcqe); break; @@ -14259,7 +14325,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) spin_lock_irqsave(&phba->hbalock, iflag); if (phba->link_state < LPFC_LINK_DOWN) /* Flush, clear interrupt, and rearm the EQ */ - lpfc_sli4_eq_flush(phba, fpeq); + lpfc_sli4_eqcq_flush(phba, fpeq); spin_unlock_irqrestore(&phba->hbalock, iflag); return IRQ_NONE; } @@ -14269,14 +14335,14 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id) fpeq->last_cpu = raw_smp_processor_id(); if (icnt > LPFC_EQD_ISR_TRIGGER && - phba->cfg_irq_chann == 1 && + fpeq->q_flag & HBA_EQ_DELAY_CHK && phba->cfg_auto_imax && fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY && phba->sli.sli_flag & LPFC_SLI_USE_EQDR) lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY); /* process and rearm the EQ */ - ecount = lpfc_sli4_process_eq(phba, fpeq); + ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM); if (unlikely(ecount == 0)) { fpeq->EQ_no_entry++; @@ -14336,6 +14402,147 @@ lpfc_sli4_intr_handler(int irq, void *dev_id) return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE; } /* lpfc_sli4_intr_handler */ +void lpfc_sli4_poll_hbtimer(struct timer_list *t) +{ + struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); + struct lpfc_queue *eq; + int i = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) + i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); + if (!list_empty(&phba->poll_list)) + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + + rcu_read_unlock(); +} + +inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path) +{ + struct lpfc_hba *phba = eq->phba; + int i = 0; + + /* + * Unlocking an irq is one of the entry point to check + * for re-schedule, but we are good for io submission + * path as midlayer does a get_cpu to glue us in. Flush + * out the invalidate queue so we can see the updated + * value for flag. + */ + smp_rmb(); + + if (READ_ONCE(eq->mode) == LPFC_EQ_POLL) + /* We will not likely get the completion for the caller + * during this iteration but i guess that's fine. + * Future io's coming on this eq should be able to + * pick it up. As for the case of single io's, they + * will be handled through a sched from polling timer + * function which is currently triggered every 1msec. + */ + i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM); + + return i; +} + +static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + if (list_empty(&phba->poll_list)) { + timer_setup(&phba->cpuhp_poll_timer, lpfc_sli4_poll_hbtimer, 0); + /* kickstart slowpath processing for this eq */ + mod_timer(&phba->cpuhp_poll_timer, + jiffies + msecs_to_jiffies(LPFC_POLL_HB)); + } + + list_add_rcu(&eq->_poll_list, &phba->poll_list); + synchronize_rcu(); +} + +static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + /* Disable slowpath processing for this eq. Kick start the eq + * by RE-ARMING the eq's ASAP + */ + list_del_rcu(&eq->_poll_list); + synchronize_rcu(); + + if (list_empty(&phba->poll_list)) + del_timer_sync(&phba->cpuhp_poll_timer); +} + +void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba) +{ + struct lpfc_queue *eq, *next; + + list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list) + list_del(&eq->_poll_list); + + INIT_LIST_HEAD(&phba->poll_list); + synchronize_rcu(); +} + +static inline void +__lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode) +{ + if (mode == eq->mode) + return; + /* + * currently this function is only called during a hotplug + * event and the cpu on which this function is executing + * is going offline. By now the hotplug has instructed + * the scheduler to remove this cpu from cpu active mask. + * So we don't need to work about being put aside by the + * scheduler for a high priority process. Yes, the inte- + * rrupts could come but they are known to retire ASAP. + */ + + /* Disable polling in the fastpath */ + WRITE_ONCE(eq->mode, mode); + /* flush out the store buffer */ + smp_wmb(); + + /* + * Add this eq to the polling list and start polling. For + * a grace period both interrupt handler and poller will + * try to process the eq _but_ that's fine. We have a + * synchronization mechanism in place (queue_claimed) to + * deal with it. This is just a draining phase for int- + * errupt handler (not eq's) as we have guranteed through + * barrier that all the CPUs have seen the new CQ_POLLED + * state. which will effectively disable the REARMING of + * the EQ. The whole idea is eq's die off eventually as + * we are not rearming EQ's anymore. + */ + mode ? lpfc_sli4_add_to_poll_list(eq) : + lpfc_sli4_remove_from_poll_list(eq); +} + +void lpfc_sli4_start_polling(struct lpfc_queue *eq) +{ + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL); +} + +void lpfc_sli4_stop_polling(struct lpfc_queue *eq) +{ + struct lpfc_hba *phba = eq->phba; + + __lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT); + + /* Kick start for the pending io's in h/w. + * Once we switch back to interrupt processing on a eq + * the io path completion will only arm eq's when it + * receives a completion. But since eq's are in disa- + * rmed state it doesn't receive a completion. This + * creates a deadlock scenaro. + */ + phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM); +} + /** * lpfc_sli4_queue_free - free a queue structure and associated memory * @queue: The queue structure to free. @@ -14410,6 +14617,7 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size, return NULL; INIT_LIST_HEAD(&queue->list); + INIT_LIST_HEAD(&queue->_poll_list); INIT_LIST_HEAD(&queue->wq_list); INIT_LIST_HEAD(&queue->wqfull_list); INIT_LIST_HEAD(&queue->page_list); @@ -16918,6 +17126,8 @@ lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr) struct fc_vft_header *fc_vft_hdr; uint32_t *header = (uint32_t *) fc_hdr; +#define FC_RCTL_MDS_DIAGS 0xF4 + switch (fc_hdr->fh_r_ctl) { case FC_RCTL_DD_UNCAT: /* uncategorized information */ case FC_RCTL_DD_SOL_DATA: /* solicited data */ @@ -17445,7 +17655,6 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; ctiocb->context1 = lpfc_nlp_get(ndlp); - ctiocb->iocb_cmpl = NULL; ctiocb->vport = phba->pport; ctiocb->iocb_cmpl = lpfc_sli4_seq_abort_rsp_cmpl; ctiocb->sli4_lxritag = NO_XRI; @@ -17928,6 +18137,17 @@ lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba, fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->cq_event.cqe.rcqe_cmpl); + if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) { + vport = phba->pport; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "2023 MDS Loopback %d bytes\n", + bf_get(lpfc_rcqe_length, + &dmabuf->cq_event.cqe.rcqe_cmpl)); + /* Handle MDS Loopback frames */ + lpfc_sli4_handle_mds_loopback(vport, dmabuf); + return; + } + /* d_id this frame is directed to */ did = sli4_did_from_fc_hdr(fc_hdr); @@ -18151,8 +18371,9 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) phba->sli4_hba.max_cfg_param.rpi_used++; phba->sli4_hba.rpi_count++; } - lpfc_printf_log(phba, KERN_INFO, LOG_SLI, - "0001 rpi:%x max:%x lim:%x\n", + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0001 Allocated rpi:x%x max:x%x lim:x%x\n", (int) rpi, max_rpi, rpi_limit); /* @@ -18208,9 +18429,21 @@ lpfc_sli4_alloc_rpi(struct lpfc_hba *phba) static void __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi) { + /* + * if the rpi value indicates a prior unreg has already + * been done, skip the unreg. + */ + if (rpi == LPFC_RPI_ALLOC_ERROR) + return; + if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) { phba->sli4_hba.rpi_count--; phba->sli4_hba.max_cfg_param.rpi_used--; + } else { + lpfc_printf_log(phba, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "2016 rpi %x not inuse\n", + rpi); } } @@ -19225,7 +19458,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, struct lpfc_mbx_wr_object *wr_object; LPFC_MBOXQ_t *mbox; int rc = 0, i = 0; - uint32_t shdr_status, shdr_add_status, shdr_change_status; + uint32_t shdr_status, shdr_add_status, shdr_change_status, shdr_csf; uint32_t mbox_tmo; struct lpfc_dmabuf *dmabuf; uint32_t written = 0; @@ -19282,6 +19515,16 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, if (check_change_status) { shdr_change_status = bf_get(lpfc_wr_object_change_status, &wr_object->u.response); + + if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET || + shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) { + shdr_csf = bf_get(lpfc_wr_object_csf, + &wr_object->u.response); + if (shdr_csf) + shdr_change_status = + LPFC_CHANGE_STATUS_PCI_RESET; + } + switch (shdr_change_status) { case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET): lpfc_printf_log(phba, KERN_INFO, LOG_INIT, @@ -19461,7 +19704,7 @@ lpfc_drain_txq(struct lpfc_hba *phba) if (phba->link_flag & LS_MDS_LOOPBACK) { /* MDS WQE are posted only to first WQ*/ - wq = phba->sli4_hba.hdwq[0].fcp_wq; + wq = phba->sli4_hba.hdwq[0].io_wq; if (unlikely(!wq)) return 0; pring = wq->pring; @@ -19706,16 +19949,18 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } /* NVME_FCREQ and NVME_ABTS requests */ if (pwqe->iocb_flag & LPFC_IO_NVME) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - wq = qp->nvme_wq; + wq = qp->io_wq; pring = wq->pring; - bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); @@ -19726,13 +19971,15 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } /* NVMET requests */ if (pwqe->iocb_flag & LPFC_IO_NVMET) { /* Get the IO distribution (hba_wqidx) for WQ assignment. */ - wq = qp->nvme_wq; + wq = qp->io_wq; pring = wq->pring; ctxp = pwqe->context2; @@ -19743,7 +19990,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com, pwqe->sli4_xritag); - bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->nvme_cq_map); + bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map); lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags, qp, wq_access); @@ -19754,6 +20001,8 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, } lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); spin_unlock_irqrestore(&pring->ring_lock, iflags); + + lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); return 0; } return WQE_ERROR; @@ -19790,9 +20039,7 @@ void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid) if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) { pvt_pool = &qp->p_multixri_pool->pvt_pool; pbl_pool = &qp->p_multixri_pool->pbl_pool; - txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; - if (qp->nvme_wq) - txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; multixri_pool->stat_pbl_count = pbl_pool->count; multixri_pool->stat_pvt_count = pvt_pool->count; @@ -19862,12 +20109,9 @@ void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid) watermark_max = xri_limit; watermark_min = xri_limit / 2; - txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; abts_io_bufs = qp->abts_scsi_io_bufs; - if (qp->nvme_wq) { - txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; - abts_io_bufs += qp->abts_nvme_io_bufs; - } + abts_io_bufs += qp->abts_nvme_io_bufs; new_watermark = txcmplq_cnt + abts_io_bufs; new_watermark = min(watermark_max, new_watermark); @@ -20121,6 +20365,13 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, lpfc_ncmd->cur_iocbq.wqe_cmpl = NULL; lpfc_ncmd->cur_iocbq.iocb_cmpl = NULL; + if (phba->cfg_xpsgl && !phba->nvmet_support && + !list_empty(&lpfc_ncmd->dma_sgl_xtra_list)) + lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd); + + if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list)) + lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd); + if (phba->cfg_xri_rebalancing) { if (lpfc_ncmd->expedite) { /* Return to expedite pool */ @@ -20142,12 +20393,9 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd, pbl_pool = &qp->p_multixri_pool->pbl_pool; pvt_pool = &qp->p_multixri_pool->pvt_pool; - txcmplq_cnt = qp->fcp_wq->pring->txcmplq_cnt; + txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt; abts_io_bufs = qp->abts_scsi_io_bufs; - if (qp->nvme_wq) { - txcmplq_cnt += qp->nvme_wq->pring->txcmplq_cnt; - abts_io_bufs += qp->abts_nvme_io_bufs; - } + abts_io_bufs += qp->abts_nvme_io_bufs; xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs; xri_limit = qp->p_multixri_pool->xri_limit; @@ -20402,3 +20650,294 @@ struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba, return lpfc_cmd; } + +/** + * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to append the SGL chunk + * + * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool, + * and will allocate an SGL chunk if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to sli4_hybrid_sgl - Success + **/ +struct sli4_hybrid_sgl * +lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct sli4_hybrid_sgl *allocated_sgl = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the sgl_list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_sgl_xtra_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(hdwq->io_wq->chann)); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8353 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool, + GFP_ATOMIC, &tmp->dma_phys_sgl); + if (!tmp->dma_sgl) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8354 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); + } + + allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list, + struct sli4_hybrid_sgl, + list_node); + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + + return allocated_sgl; +} + +/** + * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the SGL chunk + * + * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->sgl_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_sgl_xtra_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + return rc; +} + +/** + * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup sgl buff resources on + * + * This routine frees all SGL chunks of hdwq SGL chunk pool. + * + * Return codes: + * None + **/ +void +lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->sgl_list; + struct sli4_hybrid_sgl *list_entry = NULL; + struct sli4_hybrid_sgl *tmp = NULL; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + /* Free sgl pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, list_node) { + dma_pool_free(phba->lpfc_sg_dma_buf_pool, + list_entry->dma_sgl, + list_entry->dma_phys_sgl); + list_del(&list_entry->list_node); + kfree(list_entry); + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); +} + +/** + * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer + * + * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool, + * and will allocate an CMD/RSP buffer if the pool is empty. + * + * Return codes: + * NULL - Error + * Pointer to fcp_cmd_rsp_buf - Success + **/ +struct fcp_cmd_rsp_buf * +lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct fcp_cmd_rsp_buf *allocated_buf = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(buf_list))) { + /* break off 1 chunk from the list */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + list_move_tail(&list_entry->list_node, + &lpfc_buf->dma_cmd_rsp_list); + break; + } + } else { + /* allocate more */ + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, + cpu_to_node(hdwq->io_wq->chann)); + if (!tmp) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8355 error kmalloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + return NULL; + } + + tmp->fcp_cmnd = dma_pool_alloc(phba->lpfc_cmd_rsp_buf_pool, + GFP_ATOMIC, + &tmp->fcp_cmd_rsp_dma_handle); + + if (!tmp->fcp_cmnd) { + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "8356 error pool_alloc memory for HDWQ " + "%d %s\n", + lpfc_buf->hdwq_no, __func__); + kfree(tmp); + return NULL; + } + + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + + sizeof(struct fcp_cmnd)); + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); + } + + allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list, + struct fcp_cmd_rsp_buf, + list_node); + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + + return allocated_buf; +} + +/** + * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool + * @phba: The HBA for which this call is being executed. + * @lpfc_buf: IO buf structure with the CMD/RSP buf + * + * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool. + * + * Return codes: + * 0 - Success + * -EINVAL - Error + **/ +int +lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *lpfc_buf) +{ + int rc = 0; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { + list_for_each_entry_safe(list_entry, tmp, + &lpfc_buf->dma_cmd_rsp_list, + list_node) { + list_move_tail(&list_entry->list_node, + buf_list); + } + } else { + rc = -EINVAL; + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); + return rc; +} + +/** + * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool + * @phba: phba object + * @hdwq: hdwq to cleanup cmd rsp buff resources on + * + * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool. + * + * Return codes: + * None + **/ +void +lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq) +{ + struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; + struct fcp_cmd_rsp_buf *list_entry = NULL; + struct fcp_cmd_rsp_buf *tmp = NULL; + unsigned long iflags; + + spin_lock_irqsave(&hdwq->hdwq_lock, iflags); + + /* Free cmd_rsp buf pool */ + list_for_each_entry_safe(list_entry, tmp, + buf_list, + list_node) { + dma_pool_free(phba->lpfc_cmd_rsp_buf_pool, + list_entry->fcp_cmnd, + list_entry->fcp_cmd_rsp_dma_handle); + list_del(&list_entry->list_node); + kfree(list_entry); + } + + spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags); +} diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 467b8270f7fd..7bcf922a8be2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -365,9 +365,18 @@ struct lpfc_io_buf { /* Common fields */ struct list_head list; void *data; + dma_addr_t dma_handle; dma_addr_t dma_phys_sgl; - struct sli4_sge *dma_sgl; + + struct sli4_sge *dma_sgl; /* initial segment chunk */ + + /* linked list of extra sli4_hybrid_sge */ + struct list_head dma_sgl_xtra_list; + + /* list head for fcp_cmd_rsp buf */ + struct list_head dma_cmd_rsp_list; + struct lpfc_iocbq cur_iocbq; struct lpfc_sli4_hdw_queue *hdwq; uint16_t hdwq_no; @@ -375,14 +384,13 @@ struct lpfc_io_buf { struct lpfc_nodelist *ndlp; uint32_t timeout; - uint16_t flags; /* TBD convert exch_busy to flags */ + uint16_t flags; #define LPFC_SBUF_XBUSY 0x1 /* SLI4 hba reported XB on WCQE cmpl */ #define LPFC_SBUF_BUMP_QDEPTH 0x2 /* bumped queue depth counter */ /* External DIF device IO conversions */ #define LPFC_SBUF_NORMAL_DIF 0x4 /* normal mode to insert/strip */ #define LPFC_SBUF_PASS_DIF 0x8 /* insert/strip mode to passthru */ #define LPFC_SBUF_NOT_POSTED 0x10 /* SGL failed post to FW. */ - uint16_t exch_busy; /* SLI4 hba reported XB on complete WCQE */ uint16_t status; /* From IOCB Word 7- ulpStatus */ uint32_t result; /* From IOCB Word 4. */ diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 3aeca387b22a..d963ca871383 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -41,11 +41,18 @@ /* Multi-queue arrangement for FCP EQ/CQ/WQ tuples */ #define LPFC_HBA_HDWQ_MIN 0 -#define LPFC_HBA_HDWQ_MAX 128 -#define LPFC_HBA_HDWQ_DEF 0 +#define LPFC_HBA_HDWQ_MAX 256 +#define LPFC_HBA_HDWQ_DEF LPFC_HBA_HDWQ_MIN -/* Common buffer size to accomidate SCSI and NVME IO buffers */ -#define LPFC_COMMON_IO_BUF_SZ 768 +/* irq_chann range, values */ +#define LPFC_IRQ_CHANN_MIN 0 +#define LPFC_IRQ_CHANN_MAX 256 +#define LPFC_IRQ_CHANN_DEF LPFC_IRQ_CHANN_MIN + +/* FCP MQ queue count limiting */ +#define LPFC_FCP_MQ_THRESHOLD_MIN 0 +#define LPFC_FCP_MQ_THRESHOLD_MAX 256 +#define LPFC_FCP_MQ_THRESHOLD_DEF 8 /* * Provide the default FCF Record attributes used by the driver @@ -109,9 +116,8 @@ enum lpfc_sli4_queue_type { enum lpfc_sli4_queue_subtype { LPFC_NONE, LPFC_MBOX, - LPFC_FCP, + LPFC_IO, LPFC_ELS, - LPFC_NVME, LPFC_NVMET, LPFC_NVME_LS, LPFC_USOL @@ -132,6 +138,23 @@ struct lpfc_rqb { struct lpfc_queue { struct list_head list; struct list_head wq_list; + + /* + * If interrupts are in effect on _all_ the eq's the footprint + * of polling code is zero (except mode). This memory is chec- + * ked for every io to see if the io needs to be polled and + * while completion to check if the eq's needs to be rearmed. + * Keep in same cacheline as the queue ptr to avoid cpu fetch + * stalls. Using 1B memory will leave us with 7B hole. Fill + * it with other frequently used members. + */ + uint16_t last_cpu; /* most recent cpu */ + uint16_t hdwq; + uint8_t qe_valid; + uint8_t mode; /* interrupt or polling */ +#define LPFC_EQ_INTERRUPT 0 +#define LPFC_EQ_POLL 1 + struct list_head wqfull_list; enum lpfc_sli4_queue_type type; enum lpfc_sli4_queue_subtype subtype; @@ -198,6 +221,7 @@ struct lpfc_queue { uint8_t q_flag; #define HBA_NVMET_WQFULL 0x1 /* We hit WQ Full condition for NVMET */ #define HBA_NVMET_CQ_NOTIFY 0x1 /* LPFC_NVMET_CQ_NOTIFY CQEs this EQE */ +#define HBA_EQ_DELAY_CHK 0x2 /* EQ is a candidate for coalescing */ #define LPFC_NVMET_CQ_NOTIFY 4 void __iomem *db_regaddr; uint16_t dpp_enable; @@ -238,10 +262,8 @@ struct lpfc_queue { struct delayed_work sched_spwork; uint64_t isr_timestamp; - uint16_t hdwq; - uint16_t last_cpu; /* most recent cpu */ - uint8_t qe_valid; struct lpfc_queue *assoc_qp; + struct list_head _poll_list; void **q_pgs; /* array to index entries per page */ }; @@ -450,11 +472,17 @@ struct lpfc_hba; #define LPFC_SLI4_HANDLER_NAME_SZ 16 struct lpfc_hba_eq_hdl { uint32_t idx; + uint16_t irq; char handler_name[LPFC_SLI4_HANDLER_NAME_SZ]; struct lpfc_hba *phba; struct lpfc_queue *eq; + struct cpumask aff_mask; }; +#define lpfc_get_eq_hdl(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx]) +#define lpfc_get_aff_mask(eqidx) (&phba->sli4_hba.hba_eq_hdl[eqidx].aff_mask) +#define lpfc_get_irq(eqidx) (phba->sli4_hba.hba_eq_hdl[eqidx].irq) + /*BB Credit recovery value*/ struct lpfc_bbscn_params { uint32_t word0; @@ -512,6 +540,7 @@ struct lpfc_pc_sli4_params { uint8_t cqav; uint8_t wqsize; uint8_t bv1s; + uint8_t pls; #define LPFC_WQ_SZ64_SUPPORT 1 #define LPFC_WQ_SZ128_SUPPORT 2 uint8_t wqpcnt; @@ -543,11 +572,10 @@ struct lpfc_sli4_lnk_info { #define LPFC_SLI4_HANDLER_CNT (LPFC_HBA_IO_CHAN_MAX+ \ LPFC_FOF_IO_CHAN_NUM) -/* Used for IRQ vector to CPU mapping */ +/* Used for tracking CPU mapping attributes */ struct lpfc_vector_map_info { uint16_t phys_id; uint16_t core_id; - uint16_t irq; uint16_t eq; uint16_t hdwq; uint16_t flag; @@ -641,22 +669,17 @@ struct lpfc_eq_intr_info { struct lpfc_sli4_hdw_queue { /* Pointers to the constructed SLI4 queues */ struct lpfc_queue *hba_eq; /* Event queues for HBA */ - struct lpfc_queue *fcp_cq; /* Fast-path FCP compl queue */ - struct lpfc_queue *nvme_cq; /* Fast-path NVME compl queue */ - struct lpfc_queue *fcp_wq; /* Fast-path FCP work queue */ - struct lpfc_queue *nvme_wq; /* Fast-path NVME work queue */ - uint16_t fcp_cq_map; - uint16_t nvme_cq_map; + struct lpfc_queue *io_cq; /* Fast-path FCP & NVME compl queue */ + struct lpfc_queue *io_wq; /* Fast-path FCP & NVME work queue */ + uint16_t io_cq_map; /* Keep track of IO buffers for this hardware queue */ spinlock_t io_buf_list_get_lock; /* Common buf alloc list lock */ struct list_head lpfc_io_buf_list_get; spinlock_t io_buf_list_put_lock; /* Common buf free list lock */ struct list_head lpfc_io_buf_list_put; - spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ - struct list_head lpfc_abts_scsi_buf_list; - spinlock_t abts_nvme_buf_list_lock; /* list of aborted NVME IOs */ - struct list_head lpfc_abts_nvme_buf_list; + spinlock_t abts_io_buf_list_lock; /* list of aborted IOs */ + struct list_head lpfc_abts_io_buf_list; uint32_t total_io_bufs; uint32_t get_io_bufs; uint32_t put_io_bufs; @@ -680,6 +703,13 @@ struct lpfc_sli4_hdw_queue { uint32_t cpucheck_xmt_io[LPFC_CHECK_CPU_CNT]; uint32_t cpucheck_cmpl_io[LPFC_CHECK_CPU_CNT]; #endif + + /* Per HDWQ pool resources */ + struct list_head sgl_list; + struct list_head cmd_rsp_buf_list; + + /* Lock for syncing Per HDWQ pool resources */ + spinlock_t hdwq_lock; }; #ifdef LPFC_HDWQ_LOCK_STAT @@ -845,8 +875,8 @@ struct lpfc_sli4_hba { struct lpfc_queue **cq_lookup; struct list_head lpfc_els_sgl_list; struct list_head lpfc_abts_els_sgl_list; - spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ - struct list_head lpfc_abts_scsi_buf_list; + spinlock_t abts_io_buf_list_lock; /* list of aborted SCSI IOs */ + struct list_head lpfc_abts_io_buf_list; struct list_head lpfc_nvmet_sgl_list; spinlock_t abts_nvmet_buf_list_lock; /* list of aborted NVMET IOs */ struct list_head lpfc_abts_nvmet_ctx_list; @@ -888,6 +918,7 @@ struct lpfc_sli4_hba { struct lpfc_vector_map_info *cpu_map; uint16_t num_possible_cpu; uint16_t num_present_cpu; + struct cpumask numa_mask; uint16_t curr_disp_cpu; struct lpfc_eq_intr_info __percpu *eq_info; uint32_t conf_trunk; @@ -1051,10 +1082,11 @@ int lpfc_sli4_resume_rpi(struct lpfc_nodelist *, void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *); void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *); void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *); -void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *, - struct sli4_wcqe_xri_aborted *, int); void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba, - struct sli4_wcqe_xri_aborted *axri, int idx); + struct sli4_wcqe_xri_aborted *axri, + struct lpfc_io_buf *lpfc_ncmd); +void lpfc_sli4_io_xri_aborted(struct lpfc_hba *phba, + struct sli4_wcqe_xri_aborted *axri, int idx); void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba, struct sli4_wcqe_xri_aborted *axri); void lpfc_sli4_els_xri_aborted(struct lpfc_hba *, @@ -1089,6 +1121,17 @@ int lpfc_sli4_post_status_check(struct lpfc_hba *); uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_sli4_ras_dma_free(struct lpfc_hba *phba); +struct sli4_hybrid_sgl *lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +struct fcp_cmd_rsp_buf *lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +int lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *buf); +int lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_io_buf *buf); +void lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq); +void lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, + struct lpfc_sli4_hdw_queue *hdwq); static inline void *lpfc_sli4_qe(struct lpfc_queue *q, uint16_t idx) { return q->q_pgs[idx / q->entry_cnt_per_pg] + diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index f7e93aaf1e00..9563c49f36ab 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "12.2.0.3" +#define LPFC_DRIVER_VERSION "12.6.0.3" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 343bc71d4615..b76646357980 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -527,9 +527,11 @@ disable_vport(struct fc_vport *fc_vport) * scsi_host_put() to release the vport. */ lpfc_mbx_unreg_vpi(vport); - spin_lock_irq(shost->host_lock); - vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; - spin_unlock_irq(shost->host_lock); + if (phba->sli_rev == LPFC_SLI_REV4) { + spin_lock_irq(shost->host_lock); + vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; + spin_unlock_irq(shost->host_lock); + } lpfc_vport_set_state(vport, FC_VPORT_DISABLED); lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT, diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c index 9c5566217ef6..b5dde9d0d054 100644 --- a/drivers/scsi/mac_scsi.c +++ b/drivers/scsi/mac_scsi.c @@ -464,7 +464,7 @@ static int __init mac_scsi_probe(struct platform_device *pdev) mac_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) mac_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) mac_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) mac_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 45a66048801b..ff6d4aa92421 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c @@ -4183,11 +4183,11 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) */ if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ && pdev->subsystem_device == 0xC000) - return -ENODEV; + goto out_disable_device; /* Now check the magic signature byte */ pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic); if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE) - return -ENODEV; + goto out_disable_device; /* Ok it is probably a megaraid */ } diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index f6ac819e6e96..8443f2f35be2 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -731,7 +731,7 @@ megaraid_init_mbox(adapter_t *adapter) goto out_free_raid_dev; } - raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128); + raid_dev->baseaddr = ioremap(raid_dev->baseport, 128); if (!raid_dev->baseaddr) { diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 59cca898f088..e83163c66884 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -41,10 +41,6 @@ static int mraid_mm_setup_dma_pools(mraid_mmadp_t *); static void mraid_mm_free_adp_resources(mraid_mmadp_t *); static void mraid_mm_teardown_dma_pools(mraid_mmadp_t *); -#ifdef CONFIG_COMPAT -static long mraid_mm_compat_ioctl(struct file *, unsigned int, unsigned long); -#endif - MODULE_AUTHOR("LSI Logic Corporation"); MODULE_DESCRIPTION("LSI Logic Management Module"); MODULE_LICENSE("GPL"); @@ -68,9 +64,7 @@ static wait_queue_head_t wait_q; static const struct file_operations lsi_fops = { .open = mraid_mm_open, .unlocked_ioctl = mraid_mm_unlocked_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = mraid_mm_compat_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, .owner = THIS_MODULE, .llseek = noop_llseek, }; @@ -224,7 +218,6 @@ mraid_mm_unlocked_ioctl(struct file *filep, unsigned int cmd, { int err; - /* inconsistent: mraid_mm_compat_ioctl doesn't take the BKL */ mutex_lock(&mraid_mm_mutex); err = mraid_mm_ioctl(filep, cmd, arg); mutex_unlock(&mraid_mm_mutex); @@ -1228,25 +1221,6 @@ mraid_mm_init(void) } -#ifdef CONFIG_COMPAT -/** - * mraid_mm_compat_ioctl - 32bit to 64bit ioctl conversion routine - * @filep : file operations pointer (ignored) - * @cmd : ioctl command - * @arg : user ioctl packet - */ -static long -mraid_mm_compat_ioctl(struct file *filep, unsigned int cmd, - unsigned long arg) -{ - int err; - - err = mraid_mm_ioctl(filep, cmd, arg); - - return err; -} -#endif - /** * mraid_mm_exit - Module exit point */ diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index a14e8344822b..83d8c4cb1ad5 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -21,8 +21,10 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "07.710.50.00-rc1" -#define MEGASAS_RELDATE "June 28, 2019" +#define MEGASAS_VERSION "07.713.01.00-rc1" +#define MEGASAS_RELDATE "Dec 27, 2019" + +#define MEGASAS_MSIX_NAME_LEN 32 /* * Device IDs @@ -2203,6 +2205,7 @@ struct megasas_aen_event { }; struct megasas_irq_context { + char name[MEGASAS_MSIX_NAME_LEN]; struct megasas_instance *instance; u32 MSIxIndex; u32 os_irq; @@ -2230,9 +2233,9 @@ enum MR_PD_TYPE { /* JBOD Queue depth definitions */ #define MEGASAS_SATA_QD 32 -#define MEGASAS_SAS_QD 64 +#define MEGASAS_SAS_QD 256 #define MEGASAS_DEFAULT_PD_QD 64 -#define MEGASAS_NVME_QD 32 +#define MEGASAS_NVME_QD 64 #define MR_DEFAULT_NVME_PAGE_SIZE 4096 #define MR_DEFAULT_NVME_PAGE_SHIFT 12 @@ -2429,6 +2432,7 @@ struct megasas_instance { u8 adapter_type; bool consistent_mask_64bit; bool support_nvme_passthru; + bool enable_sdev_max_qd; u8 task_abort_tmo; u8 max_reset_tmo; u8 snapdump_wait_time; @@ -2636,10 +2640,11 @@ enum MEGASAS_OCR_CAUSE { }; enum DCMD_RETURN_STATUS { - DCMD_SUCCESS = 0, - DCMD_TIMEOUT = 1, - DCMD_FAILED = 2, - DCMD_NOT_FIRED = 3, + DCMD_SUCCESS = 0x00, + DCMD_TIMEOUT = 0x01, + DCMD_FAILED = 0x02, + DCMD_BUSY = 0x03, + DCMD_INIT = 0xff, }; u8 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index f9f07935556e..fd4b5ac6ac5b 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -109,6 +109,10 @@ int event_log_level = MFI_EVT_CLASS_CRITICAL; module_param(event_log_level, int, 0644); MODULE_PARM_DESC(event_log_level, "Asynchronous event logging level- range is: -2(CLASS_DEBUG) to 4(CLASS_DEAD), Default: 2(CLASS_CRITICAL)"); +unsigned int enable_sdev_max_qd; +module_param(enable_sdev_max_qd, int, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, "Enable sdev max qd as can_queue. Default: 0"); + MODULE_LICENSE("GPL"); MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux.pdl@broadcom.com"); @@ -195,7 +199,7 @@ static bool support_nvme_encapsulation; static bool support_pci_lane_margining; /* define lock for aen poll */ -spinlock_t poll_aen_lock; +static spinlock_t poll_aen_lock; extern struct dentry *megasas_debugfs_root; extern void megasas_init_debugfs(void); @@ -1095,7 +1099,7 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); @@ -1119,19 +1123,19 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, int timeout) { int ret = 0; - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->int_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { dev_err(&instance->pdev->dev, "DCMD(opcode: 0x%x) is timed out, func:%s\n", @@ -1140,10 +1144,9 @@ megasas_issue_blocked_cmd(struct megasas_instance *instance, } } else wait_event(instance->int_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); + cmd->cmd_status_drv != DCMD_INIT); - return (cmd->cmd_status_drv == MFI_STAT_OK) ? - DCMD_SUCCESS : DCMD_FAILED; + return cmd->cmd_status_drv; } /** @@ -1186,19 +1189,19 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); cmd->sync_cmd = 1; - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { dev_err(&instance->pdev->dev, "Failed from %s %d\n", __func__, __LINE__); - return DCMD_NOT_FIRED; + return DCMD_INIT; } instance->instancet->issue_dcmd(instance, cmd); if (timeout) { ret = wait_event_timeout(instance->abort_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS, timeout * HZ); + cmd->cmd_status_drv != DCMD_INIT, timeout * HZ); if (!ret) { opcode = cmd_to_abort->frame->dcmd.opcode; dev_err(&instance->pdev->dev, @@ -1208,13 +1211,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, } } else wait_event(instance->abort_cmd_wait_q, - cmd->cmd_status_drv != MFI_STAT_INVALID_STATUS); + cmd->cmd_status_drv != DCMD_INIT); cmd->sync_cmd = 0; megasas_return_cmd(instance, cmd); - return (cmd->cmd_status_drv == MFI_STAT_OK) ? - DCMD_SUCCESS : DCMD_FAILED; + return cmd->cmd_status_drv; } /** @@ -1883,6 +1885,10 @@ void megasas_set_dynamic_target_properties(struct scsi_device *sdev, mr_device_priv_data->is_tm_capable = raid->capability.tmCapable; + + if (!raid->flags.isEPD) + sdev->no_write_same = 1; + } else if (instance->use_seqnum_jbod_fp) { pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) + sdev->id; @@ -1941,25 +1947,19 @@ megasas_set_nvme_device_properties(struct scsi_device *sdev, u32 max_io_size) blk_queue_virt_boundary(sdev->request_queue, mr_nvme_pg_size - 1); } - /* - * megasas_set_static_target_properties - - * Device property set by driver are static and it is not required to be - * updated after OCR. - * - * set io timeout - * set device queue depth - * set nvme device properties. see - megasas_set_nvme_device_properties + * megasas_set_fw_assisted_qd - + * set device queue depth to can_queue + * set device queue depth to fw assisted qd * * @sdev: scsi device * @is_target_prop true, if fw provided target properties. */ -static void megasas_set_static_target_properties(struct scsi_device *sdev, +static void megasas_set_fw_assisted_qd(struct scsi_device *sdev, bool is_target_prop) { u8 interface_type; u32 device_qd = MEGASAS_DEFAULT_CMD_PER_LUN; - u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; u32 tgt_device_qd; struct megasas_instance *instance; struct MR_PRIV_DEVICE *mr_device_priv_data; @@ -1968,11 +1968,6 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev, mr_device_priv_data = sdev->hostdata; interface_type = mr_device_priv_data->interface_type; - /* - * The RAID firmware may require extended timeouts. - */ - blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); - switch (interface_type) { case SAS_PD: device_qd = MEGASAS_SAS_QD; @@ -1990,18 +1985,49 @@ static void megasas_set_static_target_properties(struct scsi_device *sdev, if (tgt_device_qd && (tgt_device_qd <= instance->host->can_queue)) device_qd = tgt_device_qd; + } - /* max_io_size_kb will be set to non zero for - * nvme based vd and syspd. - */ + if (instance->enable_sdev_max_qd && interface_type != UNKNOWN_DRIVE) + device_qd = instance->host->can_queue; + + scsi_change_queue_depth(sdev, device_qd); +} + +/* + * megasas_set_static_target_properties - + * Device property set by driver are static and it is not required to be + * updated after OCR. + * + * set io timeout + * set device queue depth + * set nvme device properties. see - megasas_set_nvme_device_properties + * + * @sdev: scsi device + * @is_target_prop true, if fw provided target properties. + */ +static void megasas_set_static_target_properties(struct scsi_device *sdev, + bool is_target_prop) +{ + u32 max_io_size_kb = MR_DEFAULT_NVME_MDTS_KB; + struct megasas_instance *instance; + + instance = megasas_lookup_instance(sdev->host->host_no); + + /* + * The RAID firmware may require extended timeouts. + */ + blk_queue_rq_timeout(sdev->request_queue, scmd_timeout * HZ); + + /* max_io_size_kb will be set to non zero for + * nvme based vd and syspd. + */ + if (is_target_prop) max_io_size_kb = le32_to_cpu(instance->tgt_prop->max_io_size_kb); - } if (instance->nvme_page_size && max_io_size_kb) megasas_set_nvme_device_properties(sdev, (max_io_size_kb << 10)); - scsi_change_queue_depth(sdev, device_qd); - + megasas_set_fw_assisted_qd(sdev, is_target_prop); } @@ -2126,6 +2152,12 @@ static void megasas_complete_outstanding_ioctls(struct megasas_instance *instanc void megaraid_sas_kill_hba(struct megasas_instance *instance) { + if (atomic_read(&instance->adprecovery) == MEGASAS_HW_CRITICAL_ERROR) { + dev_warn(&instance->pdev->dev, + "Adapter already dead, skipping kill HBA\n"); + return; + } + /* Set critical error to block I/O & ioctls in case caller didn't */ atomic_set(&instance->adprecovery, MEGASAS_HW_CRITICAL_ERROR); /* Wait 1 second to ensure IO or ioctls in build have posted */ @@ -2702,7 +2734,7 @@ static int megasas_wait_for_outstanding(struct megasas_instance *instance) "reset queue\n", reset_cmd); - reset_cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + reset_cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, reset_cmd->frame_phys_addr, 0, instance->reg_set); @@ -3285,6 +3317,48 @@ fw_cmds_outstanding_show(struct device *cdev, } static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + + return snprintf(buf, PAGE_SIZE, "%d\n", instance->enable_sdev_max_qd); +} + +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct megasas_instance *instance = (struct megasas_instance *)shost->hostdata; + u32 val = 0; + bool is_target_prop; + int ret_target_prop = DCMD_FAILED; + struct scsi_device *sdev; + + if (kstrtou32(buf, 0, &val) != 0) { + pr_err("megasas: could not set enable_sdev_max_qd\n"); + return -EINVAL; + } + + mutex_lock(&instance->reset_mutex); + if (val) + instance->enable_sdev_max_qd = true; + else + instance->enable_sdev_max_qd = false; + + shost_for_each_device(sdev, shost) { + ret_target_prop = megasas_get_target_prop(instance, sdev); + is_target_prop = (ret_target_prop == DCMD_SUCCESS) ? true : false; + megasas_set_fw_assisted_qd(sdev, is_target_prop); + } + mutex_unlock(&instance->reset_mutex); + + return strlen(buf); +} + +static ssize_t dump_system_regs_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -3313,6 +3387,7 @@ static DEVICE_ATTR_RW(fw_crash_state); static DEVICE_ATTR_RO(page_size); static DEVICE_ATTR_RO(ldio_outstanding); static DEVICE_ATTR_RO(fw_cmds_outstanding); +static DEVICE_ATTR_RW(enable_sdev_max_qd); static DEVICE_ATTR_RO(dump_system_regs); static DEVICE_ATTR_RO(raid_map_id); @@ -3323,6 +3398,7 @@ static struct device_attribute *megaraid_host_attrs[] = { &dev_attr_page_size, &dev_attr_ldio_outstanding, &dev_attr_fw_cmds_outstanding, + &dev_attr_enable_sdev_max_qd, &dev_attr_dump_system_regs, &dev_attr_raid_map_id, NULL, @@ -3348,7 +3424,6 @@ static struct scsi_host_template megasas_template = { .bios_param = megasas_bios_param, .change_queue_depth = scsi_change_queue_depth, .max_segment_size = 0xffffffff, - .no_write_same = 1, }; /** @@ -3364,7 +3439,11 @@ static void megasas_complete_int_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { - cmd->cmd_status_drv = cmd->frame->io.cmd_status; + if (cmd->cmd_status_drv == DCMD_INIT) + cmd->cmd_status_drv = + (cmd->frame->io.cmd_status == MFI_STAT_OK) ? + DCMD_SUCCESS : DCMD_FAILED; + wake_up(&instance->int_cmd_wait_q); } @@ -3383,7 +3462,7 @@ megasas_complete_abort(struct megasas_instance *instance, { if (cmd->sync_cmd) { cmd->sync_cmd = 0; - cmd->cmd_status_drv = 0; + cmd->cmd_status_drv = DCMD_SUCCESS; wake_up(&instance->abort_cmd_wait_q); } } @@ -3659,7 +3738,7 @@ megasas_issue_pending_cmds_again(struct megasas_instance *instance) dev_notice(&instance->pdev->dev, "%p synchronous cmd" "on the internal reset queue," "issue it again.\n", cmd); - cmd->cmd_status_drv = MFI_STAT_INVALID_STATUS; + cmd->cmd_status_drv = DCMD_INIT; instance->instancet->fire_cmd(instance, cmd->frame_phys_addr, 0, instance->reg_set); @@ -4324,7 +4403,8 @@ dcmd_timeout_ocr_possible(struct megasas_instance *instance) { if (instance->adapter_type == MFI_SERIES) return KILL_ADAPTER; else if (instance->unload || - test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) + test_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, + &instance->reset_flags)) return IGNORE_TIMEOUT; else return INITIATE_OCR; @@ -5478,9 +5558,11 @@ megasas_setup_irqs_ioapic(struct megasas_instance *instance) pdev = instance->pdev; instance->irq_context[0].instance = instance; instance->irq_context[0].MSIxIndex = 0; + snprintf(instance->irq_context->name, MEGASAS_MSIX_NAME_LEN, "%s%u", + "megasas", instance->host->host_no); if (request_irq(pci_irq_vector(pdev, 0), instance->instancet->service_isr, IRQF_SHARED, - "megasas", &instance->irq_context[0])) { + instance->irq_context->name, &instance->irq_context[0])) { dev_err(&instance->pdev->dev, "Failed to register IRQ from %s %d\n", __func__, __LINE__); @@ -5512,8 +5594,10 @@ megasas_setup_irqs_msix(struct megasas_instance *instance, u8 is_probe) for (i = 0; i < instance->msix_vectors; i++) { instance->irq_context[i].instance = instance; instance->irq_context[i].MSIxIndex = i; + snprintf(instance->irq_context[i].name, MEGASAS_MSIX_NAME_LEN, "%s%u-msix%u", + "megasas", instance->host->host_no, i); if (request_irq(pci_irq_vector(pdev, i), - instance->instancet->service_isr, 0, "megasas", + instance->instancet->service_isr, 0, instance->irq_context[i].name, &instance->irq_context[i])) { dev_err(&instance->pdev->dev, "Failed to register IRQ for vector %d.\n", i); @@ -5803,7 +5887,7 @@ static int megasas_init_fw(struct megasas_instance *instance) } base_addr = pci_resource_start(instance->pdev, instance->bar); - instance->reg_set = ioremap_nocache(base_addr, 8192); + instance->reg_set = ioremap(base_addr, 8192); if (!instance->reg_set) { dev_printk(KERN_DEBUG, &instance->pdev->dev, "Failed to map IO mem\n"); @@ -5894,6 +5978,8 @@ static int megasas_init_fw(struct megasas_instance *instance) MR_MAX_RAID_MAP_SIZE_MASK); } + instance->enable_sdev_max_qd = enable_sdev_max_qd; + switch (instance->adapter_type) { case VENTURA_SERIES: fusion->pcie_bw_limitation = true; @@ -7518,7 +7604,7 @@ megasas_resume(struct pci_dev *pdev) int rval; struct Scsi_Host *host; struct megasas_instance *instance; - int irq_flags = PCI_IRQ_LEGACY; + u32 status_reg; instance = pci_get_drvdata(pdev); @@ -7546,9 +7632,35 @@ megasas_resume(struct pci_dev *pdev) /* * We expect the FW state to be READY */ - if (megasas_transition_to_ready(instance, 0)) - goto fail_ready_state; + if (megasas_transition_to_ready(instance, 0)) { + dev_info(&instance->pdev->dev, + "Failed to transition controller to ready from %s!\n", + __func__); + if (instance->adapter_type != MFI_SERIES) { + status_reg = + instance->instancet->read_fw_status_reg(instance); + if (!(status_reg & MFI_RESET_ADAPTER) || + ((megasas_adp_reset_wait_for_ready + (instance, true, 0)) == FAILED)) + goto fail_ready_state; + } else { + atomic_set(&instance->fw_reset_no_pci_access, 1); + instance->instancet->adp_reset + (instance, instance->reg_set); + atomic_set(&instance->fw_reset_no_pci_access, 0); + + /* waiting for about 30 seconds before retry */ + ssleep(30); + + if (megasas_transition_to_ready(instance, 0)) + goto fail_ready_state; + } + + dev_info(&instance->pdev->dev, + "FW restarted successfully from %s!\n", + __func__); + } if (megasas_set_dma_mask(instance)) goto fail_set_dma_mask; @@ -7560,16 +7672,15 @@ megasas_resume(struct pci_dev *pdev) atomic_set(&instance->ldio_outstanding, 0); /* Now re-enable MSI-X */ - if (instance->msix_vectors) { - irq_flags = PCI_IRQ_MSIX; - if (instance->smp_affinity_enable) - irq_flags |= PCI_IRQ_AFFINITY; + if (instance->msix_vectors) + megasas_alloc_irq_vectors(instance); + + if (!instance->msix_vectors) { + rval = pci_alloc_irq_vectors(instance->pdev, 1, 1, + PCI_IRQ_LEGACY); + if (rval < 0) + goto fail_reenable_msix; } - rval = pci_alloc_irq_vectors(instance->pdev, 1, - instance->msix_vectors ? - instance->msix_vectors : 1, irq_flags); - if (rval < 0) - goto fail_reenable_msix; megasas_setup_reply_map(instance); @@ -7962,6 +8073,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, dma_addr_t sense_handle; unsigned long *sense_ptr; u32 opcode = 0; + int ret = DCMD_SUCCESS; memset(kbuff_arr, 0, sizeof(kbuff_arr)); @@ -8102,13 +8214,18 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, * cmd to the SCSI mid-layer */ cmd->sync_cmd = 1; - if (megasas_issue_blocked_cmd(instance, cmd, 0) == DCMD_NOT_FIRED) { + + ret = megasas_issue_blocked_cmd(instance, cmd, 0); + switch (ret) { + case DCMD_INIT: + case DCMD_BUSY: cmd->sync_cmd = 0; dev_err(&instance->pdev->dev, "return -EBUSY from %s %d cmd 0x%x opcode 0x%x cmd->cmd_status_drv 0x%x\n", - __func__, __LINE__, cmd->frame->hdr.cmd, opcode, - cmd->cmd_status_drv); - return -EBUSY; + __func__, __LINE__, cmd->frame->hdr.cmd, opcode, + cmd->cmd_status_drv); + error = -EBUSY; + goto out; } cmd->sync_cmd = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 50b8c1b12767..89c3685f5163 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -386,9 +386,8 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), le32_to_cpu(quad->diff))) == 0) { if (span_blk != NULL) { - u64 blk, debugBlk; + u64 blk; blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); - debugBlk = blk; blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; *span_blk = blk; @@ -699,9 +698,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; u32 logArm, rowMod, armQ, arm; - struct fusion_context *fusion; - fusion = instance->ctrl_context; *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); /*Get row and span from io_info for Uneven Span IO.*/ @@ -801,9 +798,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, u64 *pdBlock = &io_info->pdBlock; __le16 *pDevHandle = &io_info->devHandle; u8 *pPdInterface = &io_info->pd_interface; - struct fusion_context *fusion; - fusion = instance->ctrl_context; *pDevHandle = cpu_to_le16(MR_DEVHANDLE_INVALID); row = mega_div64_32(stripRow, raid->rowDataSize); diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 120e3c4de8c2..f3b36fd0a0eb 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -323,9 +323,6 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c { u16 cur_max_fw_cmds = 0; u16 ldio_threshold = 0; - struct megasas_register_set __iomem *reg_set; - - reg_set = instance->reg_set; /* ventura FW does not fill outbound_scratch_pad_2 with queue depth */ if (instance->adapter_type < VENTURA_SERIES) @@ -367,6 +364,35 @@ megasas_fusion_update_can_queue(struct megasas_instance *instance, int fw_boot_c instance->max_fw_cmds = instance->max_fw_cmds-1; } } + +static inline void +megasas_get_msix_index(struct megasas_instance *instance, + struct scsi_cmnd *scmd, + struct megasas_cmd_fusion *cmd, + u8 data_arms) +{ + int sdev_busy; + + /* nr_hw_queue = 1 for MegaRAID */ + struct blk_mq_hw_ctx *hctx = + scmd->device->request_queue->queue_hw_ctx[0]; + + sdev_busy = atomic_read(&hctx->nr_active); + + if (instance->perf_mode == MR_BALANCED_PERF_MODE && + sdev_busy > (data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) + cmd->request_desc->SCSIIO.MSIxIndex = + mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / + MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); + else if (instance->msix_load_balance) + cmd->request_desc->SCSIIO.MSIxIndex = + (mega_mod64(atomic64_add_return(1, &instance->total_io_count), + instance->msix_vectors)); + else + cmd->request_desc->SCSIIO.MSIxIndex = + instance->reply_map[raw_smp_processor_id()]; +} + /** * megasas_free_cmds_fusion - Free all the cmds in the free cmd pool * @instance: Adapter soft state @@ -1315,7 +1341,9 @@ megasas_sync_pd_seq_num(struct megasas_instance *instance, bool pend) { } if (ret == DCMD_TIMEOUT) - megaraid_sas_kill_hba(instance); + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, continue without JBOD sequence map\n", + __func__); if (ret == DCMD_SUCCESS) instance->pd_seq_map_id++; @@ -1397,7 +1425,9 @@ megasas_get_ld_map_info(struct megasas_instance *instance) ret = megasas_issue_polled(instance, cmd); if (ret == DCMD_TIMEOUT) - megaraid_sas_kill_hba(instance); + dev_warn(&instance->pdev->dev, + "%s DCMD timed out, RAID map is disabled\n", + __func__); megasas_return_cmd(instance, cmd); @@ -2828,19 +2858,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, fp_possible = (io_info.fpOkForIo > 0) ? true : false; } - if ((instance->perf_mode == MR_BALANCED_PERF_MODE) && - atomic_read(&scp->device->device_busy) > - (io_info.data_arms * MR_DEVICE_HIGH_IOPS_DEPTH)) - cmd->request_desc->SCSIIO.MSIxIndex = - mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / - MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); - else if (instance->msix_load_balance) - cmd->request_desc->SCSIIO.MSIxIndex = - (mega_mod64(atomic64_add_return(1, &instance->total_io_count), - instance->msix_vectors)); - else - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + megasas_get_msix_index(instance, scp, cmd, io_info.data_arms); if (instance->adapter_type >= VENTURA_SERIES) { /* FP for Optimal raid level 1. @@ -3161,18 +3179,7 @@ megasas_build_syspd_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.DevHandle = io_request->DevHandle; - if ((instance->perf_mode == MR_BALANCED_PERF_MODE) && - atomic_read(&scmd->device->device_busy) > MR_DEVICE_HIGH_IOPS_DEPTH) - cmd->request_desc->SCSIIO.MSIxIndex = - mega_mod64((atomic64_add_return(1, &instance->high_iops_outstanding) / - MR_HIGH_IOPS_BATCH_COUNT), instance->low_latency_index_start); - else if (instance->msix_load_balance) - cmd->request_desc->SCSIIO.MSIxIndex = - (mega_mod64(atomic64_add_return(1, &instance->total_io_count), - instance->msix_vectors)); - else - cmd->request_desc->SCSIIO.MSIxIndex = - instance->reply_map[raw_smp_processor_id()]; + megasas_get_msix_index(instance, scmd, cmd, 1); if (!fp_possible) { /* system pd firmware path */ @@ -3511,7 +3518,7 @@ megasas_complete_r1_command(struct megasas_instance *instance, * @instance: Adapter soft state * Completes all commands that is in reply descriptor queue */ -int +static int complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex, struct megasas_irq_context *irq_context) { @@ -3702,7 +3709,7 @@ static void megasas_enable_irq_poll(struct megasas_instance *instance) * megasas_sync_irqs - Synchronizes all IRQs owned by adapter * @instance: Adapter soft state */ -void megasas_sync_irqs(unsigned long instance_addr) +static void megasas_sync_irqs(unsigned long instance_addr) { u32 count, i; struct megasas_instance *instance = @@ -3760,7 +3767,7 @@ int megasas_irqpoll(struct irq_poll *irqpoll, int budget) * * Tasklet to complete cmds */ -void +static void megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) { struct megasas_instance *instance = @@ -3780,7 +3787,7 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) /** * megasas_isr_fusion - isr entry point */ -irqreturn_t megasas_isr_fusion(int irq, void *devp) +static irqreturn_t megasas_isr_fusion(int irq, void *devp) { struct megasas_irq_context *irq_context = devp; struct megasas_instance *instance = irq_context->instance; @@ -3816,7 +3823,7 @@ irqreturn_t megasas_isr_fusion(int irq, void *devp) * mfi_cmd: megasas_cmd pointer * */ -void +static void build_mpt_mfi_pass_thru(struct megasas_instance *instance, struct megasas_cmd *mfi_cmd) { @@ -3874,7 +3881,7 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, * @cmd: mfi cmd to build * */ -union MEGASAS_REQUEST_DESCRIPTOR_UNION * +static union MEGASAS_REQUEST_DESCRIPTOR_UNION * build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc = NULL; @@ -3900,7 +3907,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) * @cmd: mfi cmd pointer * */ -void +static void megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { @@ -4096,8 +4103,9 @@ static inline void megasas_trigger_snap_dump(struct megasas_instance *instance) } /* This function waits for outstanding commands on fusion to complete */ -int megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, - int reason, int *convert) +static int +megasas_wait_for_outstanding_fusion(struct megasas_instance *instance, + int reason, int *convert) { int i, outstanding, retval = 0, hb_seconds_missed = 0; u32 fw_state, abs_state; @@ -4221,7 +4229,8 @@ void megasas_reset_reply_desc(struct megasas_instance *instance) * megasas_refire_mgmt_cmd : Re-fire management commands * @instance: Controller's soft instance */ -void megasas_refire_mgmt_cmd(struct megasas_instance *instance) +void megasas_refire_mgmt_cmd(struct megasas_instance *instance, + bool return_ioctl) { int j; struct megasas_cmd_fusion *cmd_fusion; @@ -4285,6 +4294,16 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) break; } + if (return_ioctl && cmd_mfi->sync_cmd && + cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { + dev_err(&instance->pdev->dev, + "return -EBUSY from %s %d cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->cmd_status_drv = DCMD_BUSY; + result = COMPLETE_CMD; + } + switch (result) { case REFIRE_CMD: megasas_fire_cmd_fusion(instance, req_desc); @@ -4300,6 +4319,37 @@ void megasas_refire_mgmt_cmd(struct megasas_instance *instance) } /* + * megasas_return_polled_cmds: Return polled mode commands back to the pool + * before initiating an OCR. + * @instance: Controller's soft instance + */ +static void +megasas_return_polled_cmds(struct megasas_instance *instance) +{ + int i; + struct megasas_cmd_fusion *cmd_fusion; + struct fusion_context *fusion; + struct megasas_cmd *cmd_mfi; + + fusion = instance->ctrl_context; + + for (i = instance->max_scsi_cmds; i < instance->max_fw_cmds; i++) { + cmd_fusion = fusion->cmd_list[i]; + cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx]; + + if (cmd_mfi->flags & DRV_DCMD_POLLED_MODE) { + if (megasas_dbg_lvl & OCR_DEBUG) + dev_info(&instance->pdev->dev, + "%s %d return cmd 0x%x opcode 0x%x\n", + __func__, __LINE__, cmd_mfi->frame->hdr.cmd, + le32_to_cpu(cmd_mfi->frame->dcmd.opcode)); + cmd_mfi->flags &= ~DRV_DCMD_POLLED_MODE; + megasas_return_cmd(instance, cmd_mfi); + } + } +} + +/* * megasas_track_scsiio : Track SCSI IOs outstanding to a SCSI device * @instance: per adapter struct * @channel: the channel assigned by the OS @@ -4747,7 +4797,8 @@ out: } /*SRIOV get other instance in cluster if any*/ -struct megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) +static struct +megasas_instance *megasas_get_peer_instance(struct megasas_instance *instance) { int i; @@ -4848,6 +4899,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) if (instance->requestorId && !instance->skip_heartbeat_timer_del) del_timer_sync(&instance->sriov_heartbeat_timer); set_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + set_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); atomic_set(&instance->adprecovery, MEGASAS_ADPRESET_SM_POLLING); instance->instancet->disable_intr(instance); megasas_sync_irqs((unsigned long)instance); @@ -4952,7 +5004,9 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) goto kill_hba; } - megasas_refire_mgmt_cmd(instance); + megasas_refire_mgmt_cmd(instance, + (i == (MEGASAS_FUSION_MAX_RESET_TRIES - 1) + ? 1 : 0)); /* Reset load balance info */ if (fusion->load_balance_info) @@ -4960,8 +5014,16 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) (sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT)); - if (!megasas_get_map_info(instance)) + if (!megasas_get_map_info(instance)) { megasas_sync_map_info(instance); + } else { + /* + * Return pending polled mode cmds before + * retrying OCR + */ + megasas_return_polled_cmds(instance); + continue; + } megasas_setup_jbod_map(instance); @@ -4988,6 +5050,15 @@ int megasas_reset_fusion(struct Scsi_Host *shost, int reason) megasas_set_dynamic_target_properties(sdev, is_target_prop); } + status_reg = instance->instancet->read_fw_status_reg + (instance); + abs_state = status_reg & MFI_STATE_MASK; + if (abs_state != MFI_STATE_OPERATIONAL) { + dev_info(&instance->pdev->dev, + "Adapter is not OPERATIONAL, state 0x%x for scsi:%d\n", + abs_state, instance->host->host_no); + goto out; + } atomic_set(&instance->adprecovery, MEGASAS_HBA_OPERATIONAL); dev_info(&instance->pdev->dev, @@ -5047,13 +5118,13 @@ kill_hba: instance->skip_heartbeat_timer_del = 1; retval = FAILED; out: - clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); + clear_bit(MEGASAS_FUSION_OCR_NOT_POSSIBLE, &instance->reset_flags); mutex_unlock(&instance->reset_mutex); return retval; } /* Fusion Crash dump collection */ -void megasas_fusion_crash_dump(struct megasas_instance *instance) +static void megasas_fusion_crash_dump(struct megasas_instance *instance) { u32 status_reg; u8 partial_copy = 0; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index c013c80fe4e6..d57ecc7f88d8 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -89,6 +89,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { #define MEGASAS_FP_CMD_LEN 16 #define MEGASAS_FUSION_IN_RESET 0 +#define MEGASAS_FUSION_OCR_NOT_POSSIBLE 1 #define RAID_1_PEER_CMDS 2 #define JBOD_MAPS_COUNT 2 #define MEGASAS_REDUCE_QD_COUNT 64 @@ -864,9 +865,20 @@ struct MR_LD_RAID { u8 regTypeReqOnRead; __le16 seqNum; - struct { - u32 ldSyncRequired:1; - u32 reserved:31; +struct { +#ifndef MFI_BIG_ENDIAN + u32 ldSyncRequired:1; + u32 regTypeReqOnReadIsValid:1; + u32 isEPD:1; + u32 enableSLDOnAllRWIOs:1; + u32 reserved:28; +#else + u32 reserved:28; + u32 enableSLDOnAllRWIOs:1; + u32 isEPD:1; + u32 regTypeReqOnReadIsValid:1; + u32 ldSyncRequired:1; +#endif } flags; u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h index 7efd17a3c25b..ed3923f8db4f 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2.h @@ -9,7 +9,7 @@ * scatter/gather formats. * Creation Date: June 21, 2006 * - * mpi2.h Version: 02.00.53 + * mpi2.h Version: 02.00.54 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -121,6 +121,10 @@ * 08-15-18 02.00.52 Bumped MPI2_HEADER_VERSION_UNIT. * 08-28-18 02.00.53 Bumped MPI2_HEADER_VERSION_UNIT. * Added MPI2_IOCSTATUS_FAILURE + * 12-17-18 02.00.54 Bumped MPI2_HEADER_VERSION_UNIT + * 06-24-19 02.00.55 Bumped MPI2_HEADER_VERSION_UNIT + * 08-01-19 02.00.56 Bumped MPI2_HEADER_VERSION_UNIT + * 10-02-19 02.00.57 Bumped MPI2_HEADER_VERSION_UNIT * -------------------------------------------------------------------------- */ @@ -161,7 +165,7 @@ /* Unit and Dev versioning for this MPI header set */ -#define MPI2_HEADER_VERSION_UNIT (0x35) +#define MPI2_HEADER_VERSION_UNIT (0x39) #define MPI2_HEADER_VERSION_DEV (0x00) #define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00) #define MPI2_HEADER_VERSION_UNIT_SHIFT (8) @@ -180,6 +184,7 @@ #define MPI2_IOC_STATE_READY (0x10000000) #define MPI2_IOC_STATE_OPERATIONAL (0x20000000) #define MPI2_IOC_STATE_FAULT (0x40000000) +#define MPI2_IOC_STATE_COREDUMP (0x50000000) #define MPI2_IOC_STATE_MASK (0xF0000000) #define MPI2_IOC_STATE_SHIFT (28) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h index 167d79d145ca..43a3bf8ff428 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h @@ -7,7 +7,7 @@ * Title: MPI Configuration messages and pages * Creation Date: November 10, 2006 * - * mpi2_cnfg.h Version: 02.00.46 + * mpi2_cnfg.h Version: 02.00.47 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -244,6 +244,13 @@ * Added DMDReport Delay Time defines to * PCIeIOUnitPage1 * -------------------------------------------------------------------------- + * 08-02-18 02.00.44 Added Slotx2, Slotx4 to ManPage 7. + * 08-15-18 02.00.45 Added ProductSpecific field at end of IOC Page 1 + * 08-28-18 02.00.46 Added NVMs Write Cache flag to IOUnitPage1 + * Added DMDReport Delay Time defines to PCIeIOUnitPage1 + * 12-17-18 02.00.47 Swap locations of Slotx2 and Slotx4 in ManPage 7. + * 08-01-19 02.00.49 Add MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID + * Add MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT */ #ifndef MPI2_CNFG_H @@ -810,7 +817,8 @@ typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO { U8 Location; /*0x14 */ U8 ReceptacleID; /*0x15 */ U16 Slot; /*0x16 */ - U32 Reserved2; /*0x18 */ + U16 Slotx2; /*0x18 */ + U16 Slotx4; /*0x1A */ } MPI2_MANPAGE7_CONNECTOR_INFO, *PTR_MPI2_MANPAGE7_CONNECTOR_INFO, Mpi2ManPage7ConnectorInfo_t, @@ -885,6 +893,8 @@ typedef struct _MPI2_CONFIG_PAGE_MAN_7 { #define MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER (0x00000002) #define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001) +#define MPI26_MANPAGE7_FLAG_CONN_LANE_USE_PINOUT (0x00000020) +#define MPI26_MANPAGE7_FLAG_X2_X4_SLOT_INFO_VALID (0x00000010) /* *Generic structure to use for product-specific manufacturing pages @@ -956,9 +966,10 @@ typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 { /* IO Unit Page 1 Flags defines */ #define MPI26_IOUNITPAGE1_NVME_WRCACHE_MASK (0x00030000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00000000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00010000) -#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00020000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_SHIFT (16) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_NO_CHANGE (0x00000000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_ENABLE (0x00010000) +#define MPI26_IOUNITPAGE1_NVME_WRCACHE_DISABLE (0x00020000) #define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK (0x00004000) #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000) #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000) @@ -3925,7 +3936,13 @@ typedef struct _MPI26_CONFIG_PAGE_PCIEDEV_2 { U32 MaximumDataTransferSize; /*0x0C */ U32 Capabilities; /*0x10 */ U16 NOIOB; /* 0x14 */ - U16 Reserved2; /* 0x16 */ + U16 ShutdownLatency; /* 0x16 */ + U16 VendorID; /* 0x18 */ + U16 DeviceID; /* 0x1A */ + U16 SubsystemVendorID; /* 0x1C */ + U16 SubsystemID; /* 0x1E */ + U8 RevisionID; /* 0x20 */ + U8 Reserved21[3]; /* 0x21 */ } MPI26_CONFIG_PAGE_PCIEDEV_2, *PTR_MPI26_CONFIG_PAGE_PCIEDEV_2, Mpi26PCIeDevicePage2_t, *pMpi26PCIeDevicePage2_t; diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_image.h b/drivers/scsi/mpt3sas/mpi/mpi2_image.h index 4959585f029d..33b9c3a6fd40 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_image.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_image.h @@ -5,7 +5,7 @@ * Name: mpi2_image.h * Description: Contains definitions for firmware and other component images * Creation Date: 04/02/2018 - * Version: 02.06.03 + * Version: 02.06.04 * * * Version History @@ -17,6 +17,12 @@ * 08-14-18 02.06.01 Corrected define for MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 * 08-28-18 02.06.02 Added MPI2_EXT_IMAGE_TYPE_RDE * 09-07-18 02.06.03 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 12-17-18 02.06.04 Addd MPI2_EXT_IMAGE_TYPE_PBLP + * Shorten some defines to be compatible with DOS + * 06-24-19 02.06.05 Whitespace adjustments to help with identifier + * checking tool. + * 10-02-19 02.06.06 Added MPI26_IMAGE_HEADER_SIG1_COREDUMP + * Added MPI2_FLASH_REGION_COREDUMP */ #ifndef MPI2_IMAGE_H #define MPI2_IMAGE_H @@ -200,17 +206,19 @@ typedef struct _MPI26_COMPONENT_IMAGE_HEADER { #define MPI26_IMAGE_HEADER_SIGNATURE0_MPI26 (0xEB000042) /**** Definitions for Signature1 field ****/ -#define MPI26_IMAGE_HEADER_SIGNATURE1_APPLICATION (0x20505041) -#define MPI26_IMAGE_HEADER_SIGNATURE1_CBB (0x20424243) -#define MPI26_IMAGE_HEADER_SIGNATURE1_MFG (0x2047464D) -#define MPI26_IMAGE_HEADER_SIGNATURE1_BIOS (0x534F4942) -#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIM (0x4D494948) -#define MPI26_IMAGE_HEADER_SIGNATURE1_HIIA (0x41494948) -#define MPI26_IMAGE_HEADER_SIGNATURE1_CPLD (0x444C5043) -#define MPI26_IMAGE_HEADER_SIGNATURE1_SPD (0x20445053) -#define MPI26_IMAGE_HEADER_SIGNATURE1_NVDATA (0x5444564E) -#define MPI26_IMAGE_HEADER_SIGNATURE1_GAS_GAUGE (0x20534147) -#define MPI26_IMAGE_HEADER_SIGNATURE1_PBLP (0x50424C50) +#define MPI26_IMAGE_HEADER_SIG1_APPLICATION (0x20505041) +#define MPI26_IMAGE_HEADER_SIG1_CBB (0x20424243) +#define MPI26_IMAGE_HEADER_SIG1_MFG (0x2047464D) +#define MPI26_IMAGE_HEADER_SIG1_BIOS (0x534F4942) +#define MPI26_IMAGE_HEADER_SIG1_HIIM (0x4D494948) +#define MPI26_IMAGE_HEADER_SIG1_HIIA (0x41494948) +#define MPI26_IMAGE_HEADER_SIG1_CPLD (0x444C5043) +#define MPI26_IMAGE_HEADER_SIG1_SPD (0x20445053) +#define MPI26_IMAGE_HEADER_SIG1_NVDATA (0x5444564E) +#define MPI26_IMAGE_HEADER_SIG1_GAS_GAUGE (0x20534147) +#define MPI26_IMAGE_HEADER_SIG1_PBLP (0x504C4250) +/* little-endian "DUMP" */ +#define MPI26_IMAGE_HEADER_SIG1_COREDUMP (0x504D5544) /**** Definitions for Signature2 field ****/ #define MPI26_IMAGE_HEADER_SIGNATURE2_VALUE (0x50584546) @@ -278,6 +286,7 @@ typedef struct _MPI2_EXT_IMAGE_HEADER { #define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08) #define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH (0x09) #define MPI2_EXT_IMAGE_TYPE_RDE (0x0A) +#define MPI2_EXT_IMAGE_TYPE_PBLP (0x0B) #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80) #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF) @@ -356,6 +365,7 @@ typedef struct _MPI2_FLASH_LAYOUT_DATA { #define MPI2_FLASH_REGION_MR_NVDATA (0x14) #define MPI2_FLASH_REGION_CPLD (0x15) #define MPI2_FLASH_REGION_PSOC (0x16) +#define MPI2_FLASH_REGION_COREDUMP (0x17) /*ImageRevision */ #define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00) @@ -472,12 +482,12 @@ Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t; #define MPI25_HASH_ALGORITHM_UNUSED (0x00) #define MPI25_HASH_ALGORITHM_SHA256 (0x01) -#define MPI26_HASH_ALGORITHM_VERSION_MASK (0xE0) -#define MPI26_HASH_ALGORITHM_VERSION_NONE (0x00) -#define MPI26_HASH_ALGORITHM_VERSION_SHA1 (0x20) -#define MPI26_HASH_ALGORITHM_VERSION_SHA2 (0x40) -#define MPI26_HASH_ALGORITHM_VERSION_SHA3 (0x60) -#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) +#define MPI26_HASH_ALGORITHM_VER_MASK (0xE0) +#define MPI26_HASH_ALGORITHM_VER_NONE (0x00) +#define MPI26_HASH_ALGORITHM_VER_SHA1 (0x20) +#define MPI26_HASH_ALGORITHM_VER_SHA2 (0x40) +#define MPI26_HASH_ALGORITHM_VER_SHA3 (0x60) +#define MPI26_HASH_ALGORITHM_SIZE_MASK (0x1F) #define MPI26_HASH_ALGORITHM_SIZE_256 (0x01) #define MPI26_HASH_ALGORITHM_SIZE_512 (0x02) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h index 68ea408cd5c5..e83c7c529dc9 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h @@ -175,6 +175,10 @@ * Moved FW image definitions ionto new mpi2_image,h * 08-14-18 02.00.36 Fixed definition of MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) * 09-07-18 02.00.37 Added MPI26_EVENT_PCIE_TOPO_PI_16_LANES + * 10-02-19 02.00.38 Added MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE + * Added MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED + * Added MPI2_FW_DOWNLOAD_ITYPE_COREDUMP + * Added MPI2_FW_UPLOAD_ITYPE_COREDUMP * -------------------------------------------------------------------------- */ @@ -248,6 +252,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST { /*ConfigurationFlags */ #define MPI26_IOCINIT_CFGFLAGS_NVME_SGL_FORMAT (0x0001) +#define MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE (0x0002) /*minimum depth for a Reply Descriptor Post Queue */ #define MPI2_RDPQ_DEPTH_MIN (16) @@ -377,6 +382,7 @@ typedef struct _MPI2_IOC_FACTS_REPLY { /*ProductID field uses MPI2_FW_HEADER_PID_ */ /*IOCCapabilities */ +#define MPI26_IOCFACTS_CAPABILITY_COREDUMP_ENABLED (0x00200000) #define MPI26_IOCFACTS_CAPABILITY_PCIE_SRIOV (0x00100000) #define MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ (0x00080000) #define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE (0x00040000) @@ -1458,8 +1464,8 @@ typedef struct _MPI2_FW_DOWNLOAD_REQUEST { /*MPI v2.6 and newer */ #define MPI2_FW_DOWNLOAD_ITYPE_CPLD (0x15) #define MPI2_FW_DOWNLOAD_ITYPE_PSOC (0x16) +#define MPI2_FW_DOWNLOAD_ITYPE_COREDUMP (0x17) #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0) -#define MPI2_FW_DOWNLOAD_ITYPE_TERMINATE (0xFF) /*MPI v2.0 FWDownload TransactionContext Element */ typedef struct _MPI2_FW_DOWNLOAD_TCSGE { diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h index 63a09509d7d1..bb7b79cfa558 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_pci.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_pci.h @@ -6,7 +6,7 @@ * Title: MPI PCIe Attached Devices structures and definitions. * Creation Date: October 9, 2012 * - * mpi2_pci.h Version: 02.00.03 + * mpi2_pci.h Version: 02.00.04 * * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25 * prefix are for use only on MPI v2.5 products, and must not be used @@ -24,6 +24,8 @@ * 07-01-16 02.00.02 Added MPI26_NVME_FLAGS_FORCE_ADMIN_ERR_RESP to * NVME Encapsulated Request. * 07-22-18 02.00.03 Updted flags field for NVME Encapsulated req + * 12-17-18 02.00.04 Added MPI26_PCIE_DEVINFO_SCSI + * Shortten some defines to be compatible with DOS * -------------------------------------------------------------------------- */ @@ -41,7 +43,7 @@ #define MPI26_PCIE_DEVINFO_NO_DEVICE (0x00000000) #define MPI26_PCIE_DEVINFO_PCI_SWITCH (0x00000001) #define MPI26_PCIE_DEVINFO_NVME (0x00000003) - +#define MPI26_PCIE_DEVINFO_SCSI (0x00000004) /**************************************************************************** * NVMe Encapsulated message @@ -75,10 +77,9 @@ typedef struct _MPI26_NVME_ENCAPSULATED_REQUEST { #define MPI26_NVME_FLAGS_SUBMISSIONQ_IO (0x0000) #define MPI26_NVME_FLAGS_SUBMISSIONQ_ADMIN (0x0010) /*Error Response Address Space */ -#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR (0x000C) -#define MPI26_NVME_FLAGS_MASK_ERROR_RSP_ADDR_MASK (0x000C) -#define MPI26_NVME_FLAGS_SYSTEM_RSP_ADDR (0x0000) -#define MPI26_NVME_FLAGS_IOCCTL_RSP_ADDR (0x0008) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_MASK (0x000C) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_SYSTEM (0x0000) +#define MPI26_NVME_FLAGS_ERR_RSP_ADDR_IOCTL (0x0008) /* Data Direction*/ #define MPI26_NVME_FLAGS_DATADIRECTION_MASK (0x0003) #define MPI26_NVME_FLAGS_NODATATRANSFER (0x0000) diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h index 3f966b6796b3..17ef7f63b938 100644 --- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h +++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h @@ -7,7 +7,7 @@ * Title: MPI diagnostic tool structures and definitions * Creation Date: March 26, 2007 * - * mpi2_tool.h Version: 02.00.15 + * mpi2_tool.h Version: 02.00.16 * * Version History * --------------- @@ -40,6 +40,7 @@ * Tool Request Message. * 07-22-18 02.00.15 Added defines for new TOOLBOX_PCIE_LANE_MARGINING tool. * Added option for DeviceInfo field in ISTWI tool. + * 12-17-18 02.00.16 Shorten some defines to be compatible with DOS. * -------------------------------------------------------------------------- */ @@ -230,11 +231,11 @@ typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST { #define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07) /*MPI26 TOOLBOX Request MsgFlags defines */ -#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_MASK (0x01) +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_MASK (0x01) /*Request uses Man Page 43 device index addressing */ -#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINDEX (0x00) +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INDEX (0x00) /*Request uses Man Page 43 device info struct addressing */ -#define MPI26_TOOLBOX_REQ_MSGFLAGS_ADDRESSING_DEVINFO (0x01) +#define MPI26_TOOL_ISTWI_MSGFLG_ADDR_INFO (0x01) /*Toolbox ISTWI Read Write Tool reply message */ typedef struct _MPI2_TOOLBOX_ISTWI_REPLY { @@ -403,7 +404,7 @@ Mpi2ToolboxTextDisplayRequest_t, */ /*Toolbox Backend Lane Margining Tool request message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST { +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REQUEST { U8 Tool; /*0x00 */ U8 Reserved1; /*0x01 */ U8 ChainOffset; /*0x02 */ @@ -434,7 +435,7 @@ typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REQUEST { /*Toolbox Backend Lane Margining Tool reply message */ -typedef struct _MPI26_TOOLBOX_LANE_MARGINING_REPLY { +typedef struct _MPI26_TOOLBOX_LANE_MARGIN_REPLY { U8 Tool; /*0x00 */ U8 Reserved1; /*0x01 */ U8 MsgLength; /*0x02 */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 050c0f029ef9..663782bb790d 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -124,7 +124,14 @@ enum mpt3sas_perf_mode { }; static int +_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, + u32 ioc_state, int timeout); +static int _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc); +static void +_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc); /** * mpt3sas_base_check_cmd_timeout - Function @@ -609,7 +616,8 @@ _base_fault_reset_work(struct work_struct *work) spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); - if (ioc->shost_recovery || ioc->pci_error_recovery) + if ((ioc->shost_recovery && (ioc->ioc_coredump_loop == 0)) || + ioc->pci_error_recovery) goto rearm_timer; spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); @@ -656,20 +664,64 @@ _base_fault_reset_work(struct work_struct *work) return; /* don't rearm timer */ } - ioc->non_operational_loop = 0; + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + timeout /= (FAULT_POLLING_INTERVAL/1000); + + if (ioc->ioc_coredump_loop == 0) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + /* do not accept any IOs and disable the interrupts */ + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + ioc->shost_recovery = 1; + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + _base_mask_interrupts(ioc); + _base_clear_outstanding_commands(ioc); + } + + ioc_info(ioc, "%s: CoreDump loop %d.", + __func__, ioc->ioc_coredump_loop); + /* Wait until CoreDump completes or times out */ + if (ioc->ioc_coredump_loop++ < timeout) { + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + goto rearm_timer; + } + } + + if (ioc->ioc_coredump_loop) { + if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_COREDUMP) + ioc_err(ioc, "%s: CoreDump completed. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + else + ioc_err(ioc, "%s: CoreDump Timed out. LoopCount: %d", + __func__, ioc->ioc_coredump_loop); + ioc->ioc_coredump_loop = MPT3SAS_COREDUMP_LOOP_DONE; + } + ioc->non_operational_loop = 0; if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); ioc_warn(ioc, "%s: hard reset: %s\n", __func__, rc == 0 ? "success" : "failed"); doorbell = mpt3sas_base_get_iocstate(ioc, 0); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc, doorbell & + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) + mpt3sas_print_coredump_info(ioc, doorbell & MPI2_DOORBELL_DATA_MASK); if (rc && (doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) return; /* don't rearm timer */ } + ioc->ioc_coredump_loop = 0; spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); rearm_timer: @@ -749,6 +801,49 @@ mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code) } /** + * mpt3sas_base_coredump_info - verbose translation of firmware CoreDump state + * @ioc: per adapter object + * @fault_code: fault code + * + * Return nothing. + */ +void +mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code) +{ + ioc_err(ioc, "coredump_state(0x%04x)!\n", fault_code); +} + +/** + * mpt3sas_base_wait_for_coredump_completion - Wait until coredump + * completes or times out + * @ioc: per adapter object + * + * Returns 0 for success, non-zero for failure. + */ +int +mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller) +{ + u8 timeout = (ioc->manu_pg11.CoreDumpTOSec) ? + ioc->manu_pg11.CoreDumpTOSec : + MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS; + + int ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_FAULT, + timeout); + + if (ioc_state) + ioc_err(ioc, + "%s: CoreDump timed out. (ioc_state=0x%x)\n", + caller, ioc_state); + else + ioc_info(ioc, + "%s: CoreDump completed. (ioc_state=0x%x)\n", + caller, ioc_state); + + return ioc_state; +} + +/** * mpt3sas_halt_firmware - halt's mpt controller firmware * @ioc: per adapter object * @@ -768,9 +863,14 @@ mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc) dump_stack(); doorbell = ioc->base_readl(&ioc->chip->Doorbell); - if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) - mpt3sas_base_fault_info(ioc , doorbell); - else { + if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell & + MPI2_DOORBELL_DATA_MASK); + } else { writel(0xC0FFEE00, &ioc->chip->Doorbell); ioc_err(ioc, "Firmware is halted due to command timeout\n"); } @@ -2260,6 +2360,11 @@ base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc, bool build_prp = true; data_length = scsi_bufflen(scmd); + if (pcie_device && + (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) { + build_prp = false; + return build_prp; + } /* If Datalenth is <= 16K and number of SGE’s entries are <= 2 * we built IEEE SGL @@ -3039,11 +3144,11 @@ _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc) descp = NULL; ioc_info(ioc, " %d %d\n", ioc->high_iops_queues, - ioc->msix_vector_count); + ioc->reply_queue_count); i = pci_alloc_irq_vectors_affinity(ioc->pdev, ioc->high_iops_queues, - ioc->msix_vector_count, irq_flags, descp); + ioc->reply_queue_count, irq_flags, descp); return i; } @@ -3098,6 +3203,8 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) */ if (!ioc->combined_reply_queue && ioc->hba_mpi_version_belonged != MPI2_VERSION) { + ioc_info(ioc, + "combined ReplyQueue is off, Enabling msix load balance\n"); ioc->msix_load_balance = true; } @@ -3110,9 +3217,7 @@ _base_enable_msix(struct MPT3SAS_ADAPTER *ioc) r = _base_alloc_irq_vectors(ioc); if (r < 0) { - dfailprintk(ioc, - ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", - r)); + ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n", r); goto try_ioapic; } @@ -3178,6 +3283,43 @@ mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc) } } +static int +_base_diag_reset(struct MPT3SAS_ADAPTER *ioc); + +/** + * _base_check_for_fault_and_issue_reset - check if IOC is in fault state + * and if it is in fault state then issue diag reset. + * @ioc: per adapter object + * + * Returns: 0 for success, non-zero for failure. + */ +static int +_base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc) +{ + u32 ioc_state; + int rc = -EFAULT; + + dinitprintk(ioc, pr_info("%s\n", __func__)); + if (ioc->pci_error_recovery) + return 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state)); + + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = _base_diag_reset(ioc); + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, __func__); + rc = _base_diag_reset(ioc); + } + + return rc; +} + /** * mpt3sas_base_map_resources - map in controller resources (io/irq/memap) * @ioc: per adapter object @@ -3190,7 +3332,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) struct pci_dev *pdev = ioc->pdev; u32 memap_sz; u32 pio_sz; - int i, r = 0; + int i, r = 0, rc; u64 pio_chip = 0; phys_addr_t chip_phys = 0; struct adapter_reply_queue *reply_q; @@ -3243,7 +3385,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) } if (ioc->chip == NULL) { - ioc_err(ioc, "unable to map adapter memory! or resource not found\n"); + ioc_err(ioc, + "unable to map adapter memory! or resource not found\n"); r = -EINVAL; goto out_fail; } @@ -3251,8 +3394,11 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) _base_mask_interrupts(ioc); r = _base_get_ioc_facts(ioc); - if (r) - goto out_fail; + if (r) { + rc = _base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_fail; + } if (!ioc->rdpq_array_enable_assigned) { ioc->rdpq_array_enable = ioc->rdpq_array_capable; @@ -3279,8 +3425,8 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc) ioc->combined_reply_index_count, sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->replyPostRegisterIndex) { - dfailprintk(ioc, - ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n")); + ioc_err(ioc, + "allocation for replyPostRegisterIndex failed!\n"); r = -ENOMEM; goto out_fail; } @@ -3428,6 +3574,22 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc, } /** + * _base_sdev_nr_inflight_request -get number of inflight requests + * of a request queue. + * @q: request_queue object + * + * returns number of inflight request of a request queue. + */ +inline unsigned long +_base_sdev_nr_inflight_request(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0]; + + return atomic_read(&hctx->nr_active); +} + + +/** * _base_get_high_iops_msix_index - get the msix index of * high iops queues * @ioc: per adapter object @@ -3446,7 +3608,7 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc, * reply queues in terms of batch count 16 when outstanding * IOs on the target device is >=8. */ - if (atomic_read(&scmd->device->device_busy) > + if (_base_sdev_nr_inflight_request(scmd->device->request_queue) > MPT3SAS_DEVICE_HIGH_IOPS_DEPTH) return base_mod64(( atomic64_add_return(1, &ioc->high_iops_outstanding) / @@ -4203,10 +4365,12 @@ _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc) static int _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) { - Mpi2FWImageHeader_t *FWImgHdr; + Mpi2FWImageHeader_t *fw_img_hdr; + Mpi26ComponentImageHeader_t *cmp_img_hdr; Mpi25FWUploadRequest_t *mpi_request; Mpi2FWUploadReply_t mpi_reply; int r = 0; + u32 package_version = 0; void *fwpkg_data = NULL; dma_addr_t fwpkg_data_dma; u16 smid, ioc_status; @@ -4223,7 +4387,8 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length, &fwpkg_data_dma, GFP_KERNEL); if (!fwpkg_data) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", + ioc_err(ioc, + "Memory allocation for fwpkg data failed at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); return -ENOMEM; } @@ -4263,14 +4428,26 @@ _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc) ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { - FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data; - if (FWImgHdr->PackageVersion.Word) { - ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n", - FWImgHdr->PackageVersion.Struct.Major, - FWImgHdr->PackageVersion.Struct.Minor, - FWImgHdr->PackageVersion.Struct.Unit, - FWImgHdr->PackageVersion.Struct.Dev); - } + fw_img_hdr = (Mpi2FWImageHeader_t *)fwpkg_data; + if (le32_to_cpu(fw_img_hdr->Signature) == + MPI26_IMAGE_HEADER_SIGNATURE0_MPI26) { + cmp_img_hdr = + (Mpi26ComponentImageHeader_t *) + (fwpkg_data); + package_version = + le32_to_cpu( + cmp_img_hdr->ApplicationSpecific); + } else + package_version = + le32_to_cpu( + fw_img_hdr->PackageVersion.Word); + if (package_version) + ioc_info(ioc, + "FW Package Ver(%02d.%02d.%02d.%02d)\n", + ((package_version) & 0xFF000000) >> 24, + ((package_version) & 0x00FF0000) >> 16, + ((package_version) & 0x0000FF00) >> 8, + (package_version) & 0x000000FF); } else { _debug_dump_mf(&mpi_reply, sizeof(Mpi2FWUploadReply_t)/4); @@ -4941,12 +5118,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64; } - dinitprintk(ioc, - ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n", - ioc->max_sges_in_main_message, - ioc->max_sges_in_chain_message, - ioc->shost->sg_tablesize, - ioc->chains_needed_per_io)); + ioc_info(ioc, + "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), " + "sge_per_io(%d), chains_per_io(%d)\n", + ioc->max_sges_in_main_message, + ioc->max_sges_in_chain_message, + ioc->shost->sg_tablesize, + ioc->chains_needed_per_io); /* reply post queue, 16 byte align */ reply_post_free_sz = ioc->reply_post_queue_depth * @@ -5037,6 +5215,7 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) _base_release_memory_pools(ioc); goto retry_allocation; } + memset(ioc->request, 0, sz); if (retry_sz) ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n", @@ -5055,15 +5234,13 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth * ioc->request_sz); - dinitprintk(ioc, - ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", - ioc->request, ioc->hba_queue_depth, - ioc->request_sz, - (ioc->hba_queue_depth * ioc->request_sz) / 1024)); + ioc_info(ioc, + "request pool(0x%p) - dma(0x%llx): " + "depth(%d), frame_size(%d), pool_size(%d kB)\n", + ioc->request, (unsigned long long) ioc->request_dma, + ioc->hba_queue_depth, ioc->request_sz, + (ioc->hba_queue_depth * ioc->request_sz) / 1024); - dinitprintk(ioc, - ioc_info(ioc, "request pool: dma(0x%llx)\n", - (unsigned long long)ioc->request_dma)); total_sz += sz; dinitprintk(ioc, @@ -5194,7 +5371,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) &ct->chain_buffer_dma); if (!ct->chain_buffer) { ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n"); - _base_release_memory_pools(ioc); goto out; } } @@ -5249,13 +5425,12 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) goto out; } } - dinitprintk(ioc, - ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n", - ioc->sense, ioc->scsiio_depth, - SCSI_SENSE_BUFFERSIZE, sz / 1024)); - dinitprintk(ioc, - ioc_info(ioc, "sense_dma(0x%llx)\n", - (unsigned long long)ioc->sense_dma)); + ioc_info(ioc, + "sense pool(0x%p)- dma(0x%llx): depth(%d)," + "element_size(%d), pool_size(%d kB)\n", + ioc->sense, (unsigned long long)ioc->sense_dma, ioc->scsiio_depth, + SCSI_SENSE_BUFFERSIZE, sz / 1024); + total_sz += sz; /* reply pool, 4 byte align */ @@ -5333,12 +5508,10 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc) ioc_err(ioc, "config page: dma_pool_alloc failed\n"); goto out; } - dinitprintk(ioc, - ioc_info(ioc, "config page(0x%p): size(%d)\n", - ioc->config_page, ioc->config_page_sz)); - dinitprintk(ioc, - ioc_info(ioc, "config_page_dma(0x%llx)\n", - (unsigned long long)ioc->config_page_dma)); + + ioc_info(ioc, "config page(0x%p) - dma(0x%llx): size(%d)\n", + ioc->config_page, (unsigned long long)ioc->config_page_dma, + ioc->config_page_sz); total_sz += ioc->config_page_sz; ioc_info(ioc, "Allocated physical memory: size(%d kB)\n", @@ -5393,6 +5566,8 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) return 0; if (count && current_state == MPI2_IOC_STATE_FAULT) break; + if (count && current_state == MPI2_IOC_STATE_COREDUMP) + break; usleep_range(1000, 1500); count++; @@ -5410,8 +5585,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout) * * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell. */ -static int -_base_diag_reset(struct MPT3SAS_ADAPTER *ioc); static int _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout) @@ -5496,7 +5669,12 @@ _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout) doorbell = ioc->base_readl(&ioc->chip->Doorbell); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc , doorbell); + mpt3sas_print_fault_code(ioc, doorbell); + return -EFAULT; + } + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, doorbell); return -EFAULT; } } else if (int_status == 0xFFFFFFFF) @@ -5558,6 +5736,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) { u32 ioc_state; int r = 0; + unsigned long flags; if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) { ioc_err(ioc, "%s: unknown reset_type\n", __func__); @@ -5576,6 +5755,7 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) r = -EFAULT; goto out; } + ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); if (ioc_state) { ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n", @@ -5584,6 +5764,26 @@ _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout) goto out; } out: + if (r != 0) { + ioc_state = mpt3sas_base_get_iocstate(ioc, 0); + spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); + /* + * Wait for IOC state CoreDump to clear only during + * HBA initialization & release time. + */ + if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP && (ioc->is_driver_loading == 1 || + ioc->fault_reset_work_q == NULL)) { + spin_unlock_irqrestore( + &ioc->ioc_reset_in_progress_lock, flags); + mpt3sas_print_coredump_info(ioc, ioc_state); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + spin_lock_irqsave( + &ioc->ioc_reset_in_progress_lock, flags); + } + spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags); + } ioc_info(ioc, "message unit reset: %s\n", r == 0 ? "SUCCESS" : "FAILED"); return r; @@ -5731,7 +5931,7 @@ _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, mfp = (__le32 *)reply; pr_info("\toffset:data\n"); for (i = 0; i < reply_bytes/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, le32_to_cpu(mfp[i])); } return 0; @@ -5799,10 +5999,9 @@ mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, ioc->ioc_link_reset_in_progress) ioc->ioc_link_reset_in_progress = 0; if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SasIoUnitControlRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, ioc->base_cmds.status, + mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t)/4, + issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -5868,16 +6067,16 @@ mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc, ioc->base_cmds.status = MPT3_CMD_PENDING; request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->base_cmds.smid = smid; + memset(request, 0, ioc->request_sz); memcpy(request, mpi_request, sizeof(Mpi2SepReply_t)); init_completion(&ioc->base_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->base_cmds.done, msecs_to_jiffies(10000)); if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->base_cmds.status, mpi_request, - sizeof(Mpi2SepRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->base_cmds.status, mpi_request, + sizeof(Mpi2SepRequest_t)/4, issue_reset); goto issue_host_reset; } if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) @@ -5976,9 +6175,15 @@ _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout) } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + ioc_info(ioc, + "%s: Skipping the diag reset here. (ioc_state=0x%x)\n", + __func__, ioc_state); + return -EFAULT; } ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout); @@ -6157,6 +6362,12 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma); } + /* + * Set the flag to enable CoreDump state feature in IOC firmware. + */ + mpi_request.ConfigurationFlags |= + cpu_to_le16(MPI26_IOCINIT_CFGFLAGS_COREDUMP_ENABLE); + /* This time stamp specifies number of milliseconds * since epoch ~ midnight January 1, 1970. */ @@ -6168,9 +6379,9 @@ _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc) int i; mfp = (__le32 *)&mpi_request; - pr_info("\toffset:data\n"); + ioc_info(ioc, "\toffset:data\n"); for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++) - pr_info("\t[0x%02x]:%08x\n", i*4, + ioc_info(ioc, "\t[0x%02x]:%08x\n", i*4, le32_to_cpu(mfp[i])); } @@ -6540,8 +6751,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) /* wait 100 msec */ msleep(100); - if (count++ > 20) + if (count++ > 20) { + ioc_info(ioc, + "Stop writing magic sequence after 20 retries\n"); goto out; + } host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); drsprintk(ioc, @@ -6565,8 +6779,11 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc) host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic); - if (host_diagnostic == 0xFFFFFFFF) + if (host_diagnostic == 0xFFFFFFFF) { + ioc_info(ioc, + "Invalid host diagnostic register value\n"); goto out; + } if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER)) break; @@ -6653,16 +6870,33 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) return 0; if (ioc_state & MPI2_DOORBELL_USED) { - dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n")); + ioc_info(ioc, "unexpected doorbell active!\n"); goto issue_diag_reset; } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); goto issue_diag_reset; } + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_COREDUMP) { + /* + * if host reset is invoked while watch dog thread is waiting + * for IOC state to be changed to Fault state then driver has + * to wait here for CoreDump state to clear otherwise reset + * will be issued to the FW and FW move the IOC state to + * reset state without copying the FW logs to coredump region. + */ + if (ioc->ioc_coredump_loop != MPT3SAS_COREDUMP_LOOP_DONE) { + mpt3sas_print_coredump_info(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + mpt3sas_base_wait_for_coredump_completion(ioc, + __func__); + } + goto issue_diag_reset; + } + if (type == FORCE_BIG_HAMMER) goto issue_diag_reset; @@ -6686,7 +6920,7 @@ _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type) static int _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) { - int r, i, index; + int r, i, index, rc; unsigned long flags; u32 reply_address; u16 smid; @@ -6789,8 +7023,19 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc) skip_init_reply_post_free_queue: r = _base_send_ioc_init(ioc); - if (r) - return r; + if (r) { + /* + * No need to check IOC state for fault state & issue + * diag reset during host reset. This check is need + * only during driver load time. + */ + if (!ioc->is_driver_loading) + return r; + + rc = _base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_send_ioc_init(ioc))) + return r; + } /* initialize reply free host index */ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; @@ -6882,7 +7127,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc) int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) { - int r, i; + int r, i, rc; int cpu_id, last_cpu_id = 0; dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -6895,8 +7140,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); ioc->reply_queue_count = 1; if (!ioc->cpu_msix_table) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n")); + ioc_info(ioc, "Allocation for cpu_msix_table failed!!!\n"); r = -ENOMEM; goto out_free_resources; } @@ -6905,8 +7149,7 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, sizeof(resource_size_t *), GFP_KERNEL); if (!ioc->reply_post_host_index) { - dfailprintk(ioc, - ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n")); + ioc_info(ioc, "Allocation for reply_post_host_index failed!!!\n"); r = -ENOMEM; goto out_free_resources; } @@ -6926,8 +7169,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc); - if (r) - goto out_free_resources; + if (r) { + rc = _base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_ioc_facts(ioc))) + goto out_free_resources; + } switch (ioc->hba_mpi_version_belonged) { case MPI2_VERSION: @@ -6995,8 +7241,11 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) { r = _base_get_port_facts(ioc, i); - if (r) - goto out_free_resources; + if (r) { + rc = _base_check_for_fault_and_issue_reset(ioc); + if (rc || (_base_get_port_facts(ioc, i))) + goto out_free_resources; + } } r = _base_allocate_memory_pools(ioc); @@ -7118,7 +7367,15 @@ mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc) if (r) goto out_free_resources; + /* + * Copy current copy of IOCFacts in prev_fw_facts + * and it will be used during online firmware upgrade. + */ + memcpy(&ioc->prev_fw_facts, &ioc->facts, + sizeof(struct mpt3sas_facts)); + ioc->non_operational_loop = 0; + ioc->ioc_coredump_loop = 0; ioc->got_task_abort_from_ioctl = 0; return 0; @@ -7200,14 +7457,14 @@ static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * _base_after_reset_handler - after reset handler + * _base_clear_outstanding_mpt_commands - clears outstanding mpt commands * @ioc: per adapter object */ -static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +static void +_base_clear_outstanding_mpt_commands(struct MPT3SAS_ADAPTER *ioc) { - mpt3sas_scsih_after_reset_handler(ioc); - mpt3sas_ctl_after_reset_handler(ioc); - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding mpt cmds\n", __func__)); if (ioc->transport_cmds.status & MPT3_CMD_PENDING) { ioc->transport_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid); @@ -7241,6 +7498,17 @@ static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** + * _base_clear_outstanding_commands - clear all outstanding commands + * @ioc: per adapter object + */ +static void _base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc) +{ + mpt3sas_scsih_clear_outstanding_scsi_tm_commands(ioc); + mpt3sas_ctl_clear_outstanding_ioctls(ioc); + _base_clear_outstanding_mpt_commands(ioc); +} + +/** * _base_reset_done_handler - reset done handler * @ioc: per adapter object */ @@ -7280,6 +7548,85 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc) } /** + * _base_check_ioc_facts_changes - Look for increase/decrease of IOCFacts + * attributes during online firmware upgrade and update the corresponding + * IOC variables accordingly. + * + * @ioc: Pointer to MPT_ADAPTER structure + */ +static int +_base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc) +{ + u16 pd_handles_sz; + void *pd_handles = NULL, *blocking_handles = NULL; + void *pend_os_device_add = NULL, *device_remove_in_progress = NULL; + struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts; + + if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) { + pd_handles_sz = (ioc->facts.MaxDevHandle / 8); + if (ioc->facts.MaxDevHandle % 8) + pd_handles_sz++; + + pd_handles = krealloc(ioc->pd_handles, pd_handles_sz, + GFP_KERNEL); + if (!pd_handles) { + ioc_info(ioc, + "Unable to allocate the memory for pd_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pd_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->pd_handles = pd_handles; + + blocking_handles = krealloc(ioc->blocking_handles, + pd_handles_sz, GFP_KERNEL); + if (!blocking_handles) { + ioc_info(ioc, + "Unable to allocate the memory for " + "blocking_handles of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(blocking_handles + ioc->pd_handles_sz, 0, + (pd_handles_sz - ioc->pd_handles_sz)); + ioc->blocking_handles = blocking_handles; + ioc->pd_handles_sz = pd_handles_sz; + + pend_os_device_add = krealloc(ioc->pend_os_device_add, + pd_handles_sz, GFP_KERNEL); + if (!pend_os_device_add) { + ioc_info(ioc, + "Unable to allocate the memory for pend_os_device_add of sz: %d\n", + pd_handles_sz); + return -ENOMEM; + } + memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0, + (pd_handles_sz - ioc->pend_os_device_add_sz)); + ioc->pend_os_device_add = pend_os_device_add; + ioc->pend_os_device_add_sz = pd_handles_sz; + + device_remove_in_progress = krealloc( + ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL); + if (!device_remove_in_progress) { + ioc_info(ioc, + "Unable to allocate the memory for " + "device_remove_in_progress of sz: %d\n " + , pd_handles_sz); + return -ENOMEM; + } + memset(device_remove_in_progress + + ioc->device_remove_in_progress_sz, 0, + (pd_handles_sz - ioc->device_remove_in_progress_sz)); + ioc->device_remove_in_progress = device_remove_in_progress; + ioc->device_remove_in_progress_sz = pd_handles_sz; + } + + memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts)); + return 0; +} + +/** * mpt3sas_base_hard_reset_handler - reset controller * @ioc: Pointer to MPT_ADAPTER structure * @type: FORCE_BIG_HAMMER or SOFT_RESET @@ -7319,7 +7666,9 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, MPT3_DIAG_BUFFER_IS_RELEASED))) { is_trigger = 1; ioc_state = mpt3sas_base_get_iocstate(ioc, 0); - if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) + if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT || + (ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) is_fault = 1; } _base_pre_reset_handler(ioc); @@ -7328,7 +7677,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, r = _base_make_ioc_ready(ioc, type); if (r) goto out; - _base_after_reset_handler(ioc); + _base_clear_outstanding_commands(ioc); /* If this hard reset is called while port enable is active, then * there is no reason to call make_ioc_operational @@ -7342,6 +7691,13 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, if (r) goto out; + r = _base_check_ioc_facts_changes(ioc); + if (r) { + ioc_info(ioc, + "Some of the parameters got changed in this new firmware" + " image and it requires system reboot\n"); + goto out; + } if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable) panic("%s: Issue occurred with flashing controller firmware." "Please reboot the system and ensure that the correct" @@ -7352,9 +7708,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, _base_reset_done_handler(ioc); out: - dtmprintk(ioc, - ioc_info(ioc, "%s: %s\n", - __func__, r == 0 ? "SUCCESS" : "FAILED")); + ioc_info(ioc, "%s: %s\n", __func__, r == 0 ? "SUCCESS" : "FAILED"); spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags); ioc->shost_recovery = 0; diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h index 6afbdb044310..e7197150721f 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.h +++ b/drivers/scsi/mpt3sas/mpt3sas_base.h @@ -76,8 +76,8 @@ #define MPT3SAS_DRIVER_NAME "mpt3sas" #define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>" #define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver" -#define MPT3SAS_DRIVER_VERSION "29.100.00.00" -#define MPT3SAS_MAJOR_VERSION 29 +#define MPT3SAS_DRIVER_VERSION "33.100.00.00" +#define MPT3SAS_MAJOR_VERSION 33 #define MPT3SAS_MINOR_VERSION 100 #define MPT3SAS_BUILD_VERSION 0 #define MPT3SAS_RELEASE_VERSION 00 @@ -90,6 +90,10 @@ #define MPT2SAS_BUILD_VERSION 0 #define MPT2SAS_RELEASE_VERSION 00 +/* CoreDump: Default timeout */ +#define MPT3SAS_DEFAULT_COREDUMP_TIMEOUT_SECONDS (15) /*15 seconds*/ +#define MPT3SAS_COREDUMP_LOOP_DONE (0xFF) + /* * Set MPT3SAS_SG_DEPTH value based on user input. */ @@ -140,6 +144,7 @@ #define MAX_CHAIN_ELEMT_SZ 16 #define DEFAULT_NUM_FWCHAIN_ELEMTS 8 +#define IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT 6 #define FW_IMG_HDR_READ_TIMEOUT 15 #define IOC_OPERATIONAL_WAIT_COUNT 10 @@ -303,6 +308,8 @@ struct mpt3sas_nvme_cmd { #define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01) #define MPT3_DIAG_BUFFER_IS_RELEASED (0x02) #define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04) +#define MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED (0x08) +#define MPT3_DIAG_BUFFER_IS_APP_OWNED (0x10) /* * HP HBA branding @@ -391,9 +398,15 @@ struct Mpi2ManufacturingPage11_t { u8 Reserved6; /* 2Fh */ __le32 Reserved7[7]; /* 30h - 4Bh */ u8 NVMeAbortTO; /* 4Ch */ - u8 Reserved8; /* 4Dh */ - u16 Reserved9; /* 4Eh */ - __le32 Reserved10[4]; /* 50h - 60h */ + u8 NumPerDevEvents; /* 4Dh */ + u8 HostTraceBufferDecrementSizeKB; /* 4Eh */ + u8 HostTraceBufferFlags; /* 4Fh */ + u16 HostTraceBufferMaxSizeKB; /* 50h */ + u16 HostTraceBufferMinSizeKB; /* 52h */ + u8 CoreDumpTOSec; /* 54h */ + u8 Reserved8; /* 55h */ + u16 Reserved9; /* 56h */ + __le32 Reserved10; /* 58h */ }; /** @@ -583,6 +596,8 @@ static inline void sas_device_put(struct _sas_device *s) * @enclosure_level: The level of device's enclosure from the controller * @connector_name: ASCII value of the Connector's name * @serial_number: pointer of serial number string allocated runtime + * @access_status: Device's Access Status + * @shutdown_latency: NVMe device's RTD3 Entry Latency * @refcount: reference count for deletion */ struct _pcie_device { @@ -604,6 +619,8 @@ struct _pcie_device { u8 connector_name[4]; u8 *serial_number; u8 reset_timeout; + u8 access_status; + u16 shutdown_latency; struct kref refcount; }; /** @@ -1038,6 +1055,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @cpu_msix_table: table for mapping cpus to msix index * @cpu_msix_table_sz: table size * @total_io_cnt: Gives total IO count, used to load balance the interrupts + * @ioc_coredump_loop: will have non-zero value when FW is in CoreDump state * @high_iops_outstanding: used to load balance the interrupts * within high iops reply queues * @msix_load_balance: Enables load balancing of interrupts across @@ -1045,6 +1063,7 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands * @thresh_hold: Max number of reply descriptors processed * before updating Host Index + * @drv_support_bitmap: driver's supported feature bit map * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands * @scsih_cb_idx: scsih internal commands @@ -1065,7 +1084,12 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc); * @event_context: unique id for each logged event * @event_log: event log pointer * @event_masks: events that are masked + * @max_shutdown_latency: timeout value for NVMe shutdown operation, + * which is equal that NVMe drive's RTD3 Entry Latency + * which has reported maximum RTD3 Entry Latency value + * among attached NVMe drives. * @facts: static facts data + * @prev_fw_facts: previous fw facts data * @pfacts: static port facts data * @manu_pg0: static manufacturing page 0 * @manu_pg10: static manufacturing page 10 @@ -1222,11 +1246,14 @@ struct MPT3SAS_ADAPTER { u32 ioc_reset_count; MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; u32 non_operational_loop; + u8 ioc_coredump_loop; atomic64_t total_io_cnt; atomic64_t high_iops_outstanding; bool msix_load_balance; u16 thresh_hold; u8 high_iops_queues; + u32 drv_support_bitmap; + bool enable_sdev_max_qd; /* internal commands, callback index */ u8 scsi_io_cb_idx; @@ -1272,10 +1299,11 @@ struct MPT3SAS_ADAPTER { u8 tm_custom_handling; u8 nvme_abort_timeout; - + u16 max_shutdown_latency; /* static config pages */ struct mpt3sas_facts facts; + struct mpt3sas_facts prev_fw_facts; struct mpt3sas_port_facts *pfacts; Mpi2ManufacturingPage0_t manu_pg0; struct Mpi2ManufacturingPage10_t manu_pg10; @@ -1450,6 +1478,8 @@ struct MPT3SAS_ADAPTER { GET_MSIX_INDEX get_msix_index_for_smlio; }; +#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001 + typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); @@ -1517,6 +1547,17 @@ void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked); void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code); +#define mpt3sas_print_fault_code(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_fault_info(ioc, fault_code); } while (0) + +void mpt3sas_base_coredump_info(struct MPT3SAS_ADAPTER *ioc, u16 fault_code); +#define mpt3sas_print_coredump_info(ioc, fault_code) \ +do { pr_err("%s fault info from func: %s\n", ioc->name, __func__); \ + mpt3sas_base_coredump_info(ioc, fault_code); } while (0) + +int mpt3sas_base_wait_for_coredump_completion(struct MPT3SAS_ADAPTER *ioc, + const char *caller); int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc, Mpi2SasIoUnitControlReply_t *mpi_reply, Mpi2SasIoUnitControlRequest_t *mpi_request); @@ -1538,6 +1579,11 @@ mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc, u8 status, void *mpi_request, int sz); +#define mpt3sas_check_cmd_timeout(ioc, status, mpi_request, sz, issue_reset) \ +do { ioc_err(ioc, "In func: %s\n", __func__); \ + issue_reset = mpt3sas_base_check_cmd_timeout(ioc, \ + status, mpi_request, sz); } while (0) + int mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int wait_count); /* scsih shared API */ @@ -1546,7 +1592,8 @@ struct scsi_cmnd *mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_scsih_clear_outstanding_scsi_tm_commands( + struct MPT3SAS_ADAPTER *ioc); void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, @@ -1579,6 +1626,7 @@ struct _pcie_device *mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc); struct _raid_device * mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle); +void mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth); /* config shared API */ u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, @@ -1679,7 +1727,7 @@ void mpt3sas_ctl_exit(ushort hbas_to_enumerate); u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply); void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc); -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc); +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc); void mpt3sas_ctl_reset_done_handler(struct MPT3SAS_ADAPTER *ioc); u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply); @@ -1733,4 +1781,20 @@ mpt3sas_setup_direct_io(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd, /* NCQ Prio Handling Check */ bool scsih_ncq_prio_supp(struct scsi_device *sdev); +/** + * _scsih_is_pcie_scsi_device - determines if device is an pcie scsi device + * @device_info: bitfield providing information about the device. + * Context: none + * + * Returns 1 if scsi device. + */ +static inline int +mpt3sas_scsih_is_pcie_scsi_device(u32 device_info) +{ + if ((device_info & + MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) == MPI26_PCIE_DEVINFO_SCSI) + return 1; + else + return 0; +} #endif /* MPT3SAS_BASE_H_INCLUDED */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c index 14a1a2793dd5..62ddf53ab3ae 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_config.c +++ b/drivers/scsi/mpt3sas/mpt3sas_config.c @@ -101,9 +101,6 @@ _config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, Mpi2ConfigRequest_t *mpi_request; char *desc = NULL; - if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) - return; - mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) { case MPI2_CONFIG_PAGETYPE_IO_UNIT: @@ -269,7 +266,8 @@ mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, mpi_reply->MsgLength*4); } ioc->config_cmds.status &= ~MPT3_CMD_PENDING; - _config_display_some_debug(ioc, smid, "config_done", mpi_reply); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_done", mpi_reply); ioc->config_cmds.smid = USHRT_MAX; complete(&ioc->config_cmds.done); return 1; @@ -305,6 +303,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t u8 retry_count, issue_host_reset = 0; struct config_request mem; u32 ioc_status = UINT_MAX; + u8 issue_reset = 0; mutex_lock(&ioc->config_cmds.mutex); if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) { @@ -378,14 +377,18 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t config_request = mpt3sas_base_get_msg_frame(ioc, smid); ioc->config_cmds.smid = smid; memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t)); - _config_display_some_debug(ioc, smid, "config_request", NULL); + if (ioc->logging_level & MPT_DEBUG_CONFIG) + _config_display_some_debug(ioc, smid, "config_request", NULL); init_completion(&ioc->config_cmds.done); ioc->put_smid_default(ioc, smid); wait_for_completion_timeout(&ioc->config_cmds.done, timeout*HZ); if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) { - mpt3sas_base_check_cmd_timeout(ioc, - ioc->config_cmds.status, mpi_request, - sizeof(Mpi2ConfigRequest_t)/4); + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); + mpt3sas_check_cmd_timeout(ioc, + ioc->config_cmds.status, mpi_request, + sizeof(Mpi2ConfigRequest_t)/4, issue_reset); retry_count++; if (ioc->config_cmds.smid == smid) mpt3sas_base_free_smid(ioc, smid); @@ -404,8 +407,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t /* Reply Frame Sanity Checks to workaround FW issues */ if ((mpi_request->Header.PageType & 0xF) != (mpi_reply->Header.PageType & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", ioc->name, __func__, mpi_request->Header.PageType & 0xF, @@ -415,8 +421,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && mpi_request->ExtPageType != mpi_reply->ExtPageType) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); panic("%s: %s: Firmware BUG: mpi_reply mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", ioc->name, __func__, mpi_request->ExtPageType, @@ -439,8 +448,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (p) { if ((mpi_request->Header.PageType & 0xF) != (p[3] & 0xF)) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic("%s: %s: Firmware BUG: config page mismatch: Requested PageType(0x%02x) Reply PageType(0x%02x)\n", @@ -452,8 +464,11 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t if (((mpi_request->Header.PageType & 0xF) == MPI2_CONFIG_PAGETYPE_EXTENDED) && (mpi_request->ExtPageType != p[6])) { + if (!(ioc->logging_level & MPT_DEBUG_CONFIG)) + _config_display_some_debug(ioc, + smid, "config_request", NULL); _debug_dump_mf(mpi_request, ioc->request_sz/4); - _debug_dump_reply(mpi_reply, ioc->request_sz/4); + _debug_dump_reply(mpi_reply, ioc->reply_sz/4); _debug_dump_config(p, min_t(u16, mem.sz, config_page_sz)/4); panic("%s: %s: Firmware BUG: config page mismatch: Requested ExtPageType(0x%02x) Reply ExtPageType(0x%02x)\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c index d4ecfbbe738c..62e552838565 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c @@ -180,6 +180,12 @@ _ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid, case MPI2_FUNCTION_SMP_PASSTHROUGH: desc = "smp_passthrough"; break; + case MPI2_FUNCTION_TOOLBOX: + desc = "toolbox"; + break; + case MPI2_FUNCTION_NVME_ENCAPSULATED: + desc = "nvme_encapsulated"; + break; } if (!desc) @@ -466,19 +472,27 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) if ((ioc->diag_buffer_status[i] & MPT3_DIAG_BUFFER_IS_RELEASED)) continue; + + /* + * add a log message to indicate the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer due to adapter reset.", + __func__); mpt3sas_send_diag_release(ioc, i, &issue_reset); } } /** - * mpt3sas_ctl_reset_handler - reset callback handler (for ctl) + * mpt3sas_ctl_reset_handler - clears outstanding ioctl cmd. * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. */ -void mpt3sas_ctl_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +void mpt3sas_ctl_clear_outstanding_ioctls(struct MPT3SAS_ADAPTER *ioc) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding ioctl cmd\n", __func__)); if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) { ioc->ctl_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid); @@ -596,8 +610,16 @@ _ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg, if (priv_data->sas_target->handle != handle) continue; st = scsi_cmd_priv(scmd); - tm_request->TaskMID = cpu_to_le16(st->smid); - found = 1; + + /* + * If the given TaskMID from the user space is zero, then the + * first outstanding smid will be picked up. Otherwise, + * targeted smid will be the one. + */ + if (!tm_request->TaskMID || tm_request->TaskMID == st->smid) { + tm_request->TaskMID = cpu_to_le16(st->smid); + found = 1; + } } if (!found) { @@ -654,7 +676,6 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, size_t data_in_sz = 0; long ret; u16 device_handle = MPT3SAS_INVALID_DEVICE_HANDLE; - u8 tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; issue_reset = 0; @@ -707,6 +728,7 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->ctl_cmds.status = MPT3_CMD_PENDING; memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); request = mpt3sas_base_get_msg_frame(ioc, smid); + memset(request, 0, ioc->request_sz); memcpy(request, mpi_request, karg.data_sge_offset*4); ioc->ctl_cmds.smid = smid; data_out_sz = karg.data_out_size; @@ -770,6 +792,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, case MPI2_FUNCTION_NVME_ENCAPSULATED: { nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request; + if (!ioc->pcie_sg_lookup) { + dtmprintk(ioc, ioc_info(ioc, + "HBA doesn't support NVMe. Rejecting NVMe Encapsulated request.\n" + )); + + if (ioc->logging_level & MPT_DEBUG_TM) + _debug_dump_mf(nvme_encap_request, + ioc->request_sz/4); + mpt3sas_base_free_smid(ioc, smid); + ret = -EINVAL; + goto out; + } /* * Get the Physical Address of the sense buffer. * Use Error Response buffer address field to hold the sense @@ -921,13 +955,37 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, Mpi2ToolboxCleanRequest_t *toolbox_request = (Mpi2ToolboxCleanRequest_t *)mpi_request; - if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) { + if ((toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) + || (toolbox_request->Tool == + MPI26_TOOLBOX_BACKEND_PCIE_LANE_MARGIN)) ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma, data_in_sz); - } else { + else if (toolbox_request->Tool == + MPI2_TOOLBOX_MEMORY_MOVE_TOOL) { + Mpi2ToolboxMemMoveRequest_t *mem_move_request = + (Mpi2ToolboxMemMoveRequest_t *)request; + Mpi2SGESimple64_t tmp, *src = NULL, *dst = NULL; + + ioc->build_sg_mpi(ioc, psge, data_out_dma, + data_out_sz, data_in_dma, data_in_sz); + if (data_out_sz && !data_in_sz) { + dst = + (Mpi2SGESimple64_t *)&mem_move_request->SGL; + src = (void *)dst + ioc->sge_size; + + memcpy(&tmp, src, ioc->sge_size); + memcpy(src, dst, ioc->sge_size); + memcpy(dst, &tmp, ioc->sge_size); + } + if (ioc->logging_level & MPT_DEBUG_TM) { + ioc_info(ioc, + "Mpi2ToolboxMemMoveRequest_t request msg\n"); + _debug_dump_mf(mem_move_request, + ioc->request_sz/4); + } + } else ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz, - data_in_dma, data_in_sz); - } + data_in_dma, data_in_sz); ioc->put_smid_default(ioc, smid); break; } @@ -970,10 +1028,9 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, ioc->ignore_loginfos = 0; } if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - karg.data_sge_offset); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + karg.data_sge_offset, issue_reset); goto issue_host_reset; } @@ -1047,12 +1104,14 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg, mpt3sas_halt_firmware(ioc); pcie_device = mpt3sas_get_pdev_by_handle(ioc, le16_to_cpu(mpi_request->FunctionDependent1)); - if (pcie_device && (!ioc->tm_custom_handling)) + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0, pcie_device->reset_timeout, - tr_method); + MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE); else mpt3sas_scsih_issue_locked_tm(ioc, le16_to_cpu(mpi_request->FunctionDependent1), @@ -1272,7 +1331,8 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg) __func__)); retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); - ioc_info(ioc, "host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); + ioc_info(ioc, + "Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED")); return 0; } @@ -1450,6 +1510,26 @@ _ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type) return rc; } +/** + * _ctl_diag_get_bufftype - return diag buffer type + * either TRACE, SNAPSHOT, or EXTENDED + * @ioc: per adapter object + * @unique_id: specifies the unique_id for the buffer + * + * returns MPT3_DIAG_UID_NOT_FOUND if the id not found + */ +static u8 +_ctl_diag_get_bufftype(struct MPT3SAS_ADAPTER *ioc, u32 unique_id) +{ + u8 index; + + for (index = 0; index < MPI2_DIAG_BUF_TYPE_COUNT; index++) { + if (ioc->unique_id[index] == unique_id) + return index; + } + + return MPT3_DIAG_UID_NOT_FOUND; +} /** * _ctl_diag_register_2 - wrapper for registering diag buffer support @@ -1497,11 +1577,88 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, return -EPERM; } + if (diag_register->unique_id == 0) { + ioc_err(ioc, + "%s: Invalid UID(0x%08x), buffer_type(0x%02x)\n", __func__, + diag_register->unique_id, buffer_type); + return -EINVAL; + } + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED) && + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: buffer_type(0x%02x) is already registered by application with UID(0x%08x)\n", + __func__, buffer_type, ioc->unique_id[buffer_type]); + return -EINVAL; + } + if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_REGISTERED) { - ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", - __func__, buffer_type); - return -EINVAL; + /* + * If driver posts buffer initially, then an application wants + * to Register that buffer (own it) without Releasing first, + * the application Register command MUST have the same buffer + * type and size in the Register command (obtained from the + * Query command). Otherwise that Register command will be + * failed. If the application has released the buffer but wants + * to re-register it, it should be allowed as long as the + * Unique-Id/Size match. + */ + + if (ioc->unique_id[buffer_type] == MPT3DIAGBUFFUNIQUEID && + ioc->diag_buffer_sz[buffer_type] == + diag_register->requested_buffer_size) { + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + dctlprintk(ioc, ioc_info(ioc, + "%s: diag_buffer (%d) ownership changed. old-ID(0x%08x), new-ID(0x%08x)\n", + __func__, buffer_type, + ioc->unique_id[buffer_type], + diag_register->unique_id)); + + /* + * Application wants to own the buffer with + * the same size. + */ + ioc->unique_id[buffer_type] = + diag_register->unique_id; + rc = 0; /* success */ + goto out; + } + } else if (ioc->unique_id[buffer_type] != + MPT3DIAGBUFFUNIQUEID) { + if (ioc->unique_id[buffer_type] != + diag_register->unique_id || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size || + !(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) { + ioc_err(ioc, + "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else { + ioc_err(ioc, "%s: already has a registered buffer for buffer_type(0x%02x)\n", + __func__, buffer_type); + return -EINVAL; + } + } else if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + + if (ioc->unique_id[buffer_type] != MPT3DIAGBUFFUNIQUEID || + ioc->diag_buffer_sz[buffer_type] != + diag_register->requested_buffer_size) { + + ioc_err(ioc, + "%s: already a buffer is allocated for buffer_type(0x%02x) of size %d bytes, so please try registering again with same size\n", + __func__, buffer_type, + ioc->diag_buffer_sz[buffer_type]); + return -EINVAL; + } } if (diag_register->requested_buffer_size % 4) { @@ -1526,7 +1683,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, request_data = ioc->diag_buffer[buffer_type]; request_data_sz = diag_register->requested_buffer_size; ioc->unique_id[buffer_type] = diag_register->unique_id; - ioc->diag_buffer_status[buffer_type] = 0; + ioc->diag_buffer_status[buffer_type] &= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; memcpy(ioc->product_specific[buffer_type], diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS); ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags; @@ -1550,7 +1708,8 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, ioc_err(ioc, "%s: failed allocating memory for diag buffers, requested size(%d)\n", __func__, request_data_sz); mpt3sas_base_free_smid(ioc, smid); - return -ENOMEM; + rc = -ENOMEM; + goto out; } ioc->diag_buffer[buffer_type] = request_data; ioc->diag_buffer_sz[buffer_type] = request_data_sz; @@ -1581,10 +1740,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } @@ -1615,9 +1773,12 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc, out: - if (rc && request_data) + if (rc && request_data) { dma_free_coherent(&ioc->pdev->dev, request_data_sz, request_data, request_data_dma); + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } ioc->ctl_cmds.status = MPT3_CMD_NOT_USED; return rc; @@ -1635,6 +1796,10 @@ void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) { struct mpt3_diag_register diag_register; + u32 ret_val; + u32 trace_buff_size = ioc->manu_pg11.HostTraceBufferMaxSizeKB<<10; + u32 min_trace_buff_size = 0; + u32 decr_trace_buff_size = 0; memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); @@ -1643,10 +1808,68 @@ mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register) ioc->diag_trigger_master.MasterData = (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; - /* register for 2MB buffers */ - diag_register.requested_buffer_size = 2 * (1024 * 1024); - diag_register.unique_id = 0x7075900; - _ctl_diag_register_2(ioc, &diag_register); + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); + + if (trace_buff_size != 0) { + diag_register.requested_buffer_size = trace_buff_size; + min_trace_buff_size = + ioc->manu_pg11.HostTraceBufferMinSizeKB<<10; + decr_trace_buff_size = + ioc->manu_pg11.HostTraceBufferDecrementSizeKB<<10; + + if (min_trace_buff_size > trace_buff_size) { + /* The buff size is not set correctly */ + ioc_err(ioc, + "Min Trace Buff size (%d KB) greater than Max Trace Buff size (%d KB)\n", + min_trace_buff_size>>10, + trace_buff_size>>10); + ioc_err(ioc, + "Using zero Min Trace Buff Size\n"); + min_trace_buff_size = 0; + } + + if (decr_trace_buff_size == 0) { + /* + * retry the min size if decrement + * is not available. + */ + decr_trace_buff_size = + trace_buff_size - min_trace_buff_size; + } + } else { + /* register for 2MB buffers */ + diag_register.requested_buffer_size = 2 * (1024 * 1024); + } + + do { + ret_val = _ctl_diag_register_2(ioc, &diag_register); + + if (ret_val == -ENOMEM && min_trace_buff_size && + (trace_buff_size - decr_trace_buff_size) >= + min_trace_buff_size) { + /* adjust the buffer size */ + trace_buff_size -= decr_trace_buff_size; + diag_register.requested_buffer_size = + trace_buff_size; + } else + break; + } while (true); + + if (ret_val == -ENOMEM) + ioc_err(ioc, + "Cannot allocate trace buffer memory. Last memory tried = %d KB\n", + diag_register.requested_buffer_size>>10); + else if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] + & MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_err(ioc, "Trace buffer memory %d KB allocated\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } } if (bits_to_register & 2) { @@ -1689,6 +1912,12 @@ _ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg) } rc = _ctl_diag_register_2(ioc, &karg); + + if (!rc && (ioc->diag_buffer_status[karg.buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + ioc->diag_buffer_status[karg.buffer_type] |= + MPT3_DIAG_BUFFER_IS_APP_OWNED; + return rc; } @@ -1718,7 +1947,13 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - buffer_type = karg.unique_id & 0x000000ff; + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + if (!_ctl_diag_capability(ioc, buffer_type)) { ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", __func__, buffer_type); @@ -1751,12 +1986,21 @@ _ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg) return -ENOMEM; } - request_data_sz = ioc->diag_buffer_sz[buffer_type]; - request_data_dma = ioc->diag_buffer_dma[buffer_type]; - dma_free_coherent(&ioc->pdev->dev, request_data_sz, - request_data, request_data_dma); - ioc->diag_buffer[buffer_type] = NULL; - ioc->diag_buffer_status[buffer_type] = 0; + if (ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED) { + ioc->unique_id[buffer_type] = MPT3DIAGBUFFUNIQUEID; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_APP_OWNED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_REGISTERED; + } else { + request_data_sz = ioc->diag_buffer_sz[buffer_type]; + request_data_dma = ioc->diag_buffer_dma[buffer_type]; + dma_free_coherent(&ioc->pdev->dev, request_data_sz, + request_data, request_data_dma); + ioc->diag_buffer[buffer_type] = NULL; + ioc->diag_buffer_status[buffer_type] = 0; + } return 0; } @@ -1795,14 +2039,17 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) return -EPERM; } - if ((ioc->diag_buffer_status[buffer_type] & - MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { - ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", - __func__, buffer_type); - return -EINVAL; + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) { + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) { + ioc_err(ioc, "%s: buffer_type(0x%02x) is not registered\n", + __func__, buffer_type); + return -EINVAL; + } } - if (karg.unique_id & 0xffffff00) { + if (karg.unique_id) { if (karg.unique_id != ioc->unique_id[buffer_type]) { ioc_err(ioc, "%s: unique_id(0x%08x) is not registered\n", __func__, karg.unique_id); @@ -1817,13 +2064,21 @@ _ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg) return -ENOMEM; } - if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED) - karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | - MPT3_APP_FLAGS_BUFFER_VALID); - else - karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED | - MPT3_APP_FLAGS_BUFFER_VALID | - MPT3_APP_FLAGS_FW_BUFFER_ACCESS); + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_REGISTERED)) + karg.application_flags |= MPT3_APP_FLAGS_BUFFER_VALID; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_RELEASED)) + karg.application_flags |= MPT3_APP_FLAGS_FW_BUFFER_ACCESS; + + if (!(ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED)) + karg.application_flags |= MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC; + + if ((ioc->diag_buffer_status[buffer_type] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) + karg.application_flags |= MPT3_APP_FLAGS_APP_OWNED; for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++) karg.product_specific[i] = @@ -1859,6 +2114,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, u16 ioc_status; u32 ioc_state; int rc; + u8 reset_needed = 0; dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); @@ -1866,6 +2122,7 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, rc = 0; *issue_reset = 0; + ioc_state = mpt3sas_base_get_iocstate(ioc, 1); if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) { if (ioc->diag_buffer_status[buffer_type] & @@ -1908,9 +2165,10 @@ mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type, MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - *issue_reset = mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagReleaseRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagReleaseRequest_t)/4, reset_needed); + *issue_reset = reset_needed; rc = -EFAULT; goto out; } @@ -1968,7 +2226,13 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - buffer_type = karg.unique_id & 0x000000ff; + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + if (!_ctl_diag_capability(ioc, buffer_type)) { ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", __func__, buffer_type); @@ -1992,7 +2256,7 @@ _ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg) MPT3_DIAG_BUFFER_IS_RELEASED) { ioc_err(ioc, "%s: buffer_type(0x%02x) is already released\n", __func__, buffer_type); - return 0; + return -EINVAL; } request_data = ioc->diag_buffer[buffer_type]; @@ -2052,7 +2316,13 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__)); - buffer_type = karg.unique_id & 0x000000ff; + buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id); + if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) { + ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n", + __func__, karg.unique_id); + return -EINVAL; + } + if (!_ctl_diag_capability(ioc, buffer_type)) { ioc_err(ioc, "%s: doesn't have capability for buffer_type(0x%02x)\n", __func__, buffer_type); @@ -2156,10 +2426,9 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) MPT3_IOCTL_DEFAULT_TIMEOUT*HZ); if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->ctl_cmds.status, mpi_request, - sizeof(Mpi2DiagBufferPostRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->ctl_cmds.status, mpi_request, + sizeof(Mpi2DiagBufferPostRequest_t)/4, issue_reset); goto issue_host_reset; } @@ -2176,6 +2445,8 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg) if (ioc_status == MPI2_IOCSTATUS_SUCCESS) { ioc->diag_buffer_status[buffer_type] |= MPT3_DIAG_BUFFER_IS_REGISTERED; + ioc->diag_buffer_status[buffer_type] &= + ~MPT3_DIAG_BUFFER_IS_RELEASED; dctlprintk(ioc, ioc_info(ioc, "%s: success\n", __func__)); } else { ioc_info(ioc, "%s: ioc_status(0x%04x) log_info(0x%08x)\n", @@ -3096,10 +3367,49 @@ host_trace_buffer_enable_store(struct device *cdev, memset(&diag_register, 0, sizeof(struct mpt3_diag_register)); ioc_info(ioc, "posting host trace buffers\n"); diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE; - diag_register.requested_buffer_size = (1024 * 1024); - diag_register.unique_id = 0x7075900; + + if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0 && + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) { + /* post the same buffer allocated previously */ + diag_register.requested_buffer_size = + ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE]; + } else { + /* + * Free the diag buffer memory which was previously + * allocated by an application. + */ + if ((ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE] != 0) + && + (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_APP_OWNED)) { + pci_free_consistent(ioc->pdev, + ioc->diag_buffer_sz[ + MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE], + ioc->diag_buffer_dma[ + MPI2_DIAG_BUF_TYPE_TRACE]); + ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] = + NULL; + } + + diag_register.requested_buffer_size = (1024 * 1024); + } + + diag_register.unique_id = + (ioc->hba_mpi_version_belonged == MPI2_VERSION) ? + (MPT2DIAGBUFFUNIQUEID):(MPT3DIAGBUFFUNIQUEID); ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0; _ctl_diag_register_2(ioc, &diag_register); + if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & + MPT3_DIAG_BUFFER_IS_REGISTERED) { + ioc_info(ioc, + "Trace buffer %d KB allocated through sysfs\n", + diag_register.requested_buffer_size>>10); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) + ioc->diag_buffer_status[ + MPI2_DIAG_BUF_TYPE_TRACE] |= + MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED; + } } else if (!strcmp(str, "release")) { /* exit out if host buffers are already released */ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) @@ -3278,9 +3588,8 @@ diag_trigger_scsi_store(struct device *cdev, ssize_t sz; spin_lock_irqsave(&ioc->diag_trigger_lock, flags); - sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count); - memset(&ioc->diag_trigger_scsi, 0, - sizeof(struct SL_WH_EVENT_TRIGGERS_T)); + sz = min(sizeof(ioc->diag_trigger_scsi), count); + memset(&ioc->diag_trigger_scsi, 0, sizeof(ioc->diag_trigger_scsi)); memcpy(&ioc->diag_trigger_scsi, buf, sz); if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES) ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES; @@ -3349,6 +3658,125 @@ static DEVICE_ATTR_RW(diag_trigger_mpi); /*****************************************/ +/** + * drv_support_bitmap_show - driver supported feature bitmap + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +drv_support_bitmap_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "0x%08x\n", ioc->drv_support_bitmap); +} +static DEVICE_ATTR_RO(drv_support_bitmap); + +/** + * enable_sdev_max_qd_show - display whether sdev max qd is enabled/disabled + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + */ +static ssize_t +enable_sdev_max_qd_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + return snprintf(buf, PAGE_SIZE, "%d\n", ioc->enable_sdev_max_qd); +} + +/** + * enable_sdev_max_qd_store - Enable/disable sdev max qd + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * A sysfs read/write shost attribute. This attribute is used to set the + * targets queue depth to HBA IO queue depth if this attribute is enabled. + * If this attribute is disabled then targets will have corresponding default + * queue depth. + */ +static ssize_t +enable_sdev_max_qd_store(struct device *cdev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + struct MPT3SAS_DEVICE *sas_device_priv_data; + struct MPT3SAS_TARGET *sas_target_priv_data; + int val = 0; + struct scsi_device *sdev; + struct _raid_device *raid_device; + int qdepth; + + if (kstrtoint(buf, 0, &val) != 0) + return -EINVAL; + + switch (val) { + case 0: + ioc->enable_sdev_max_qd = 0; + shost_for_each_device(sdev, ioc->shost) { + sas_device_priv_data = sdev->hostdata; + if (!sas_device_priv_data) + continue; + sas_target_priv_data = sas_device_priv_data->sas_target; + if (!sas_target_priv_data) + continue; + + if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_VOLUME) { + raid_device = + mpt3sas_raid_device_find_by_handle(ioc, + sas_target_priv_data->handle); + + switch (raid_device->volume_type) { + case MPI2_RAID_VOL_TYPE_RAID0: + if (raid_device->device_info & + MPI2_SAS_DEVICE_INFO_SSP_TARGET) + qdepth = + MPT3SAS_SAS_QUEUE_DEPTH; + else + qdepth = + MPT3SAS_SATA_QUEUE_DEPTH; + break; + case MPI2_RAID_VOL_TYPE_RAID1E: + case MPI2_RAID_VOL_TYPE_RAID1: + case MPI2_RAID_VOL_TYPE_RAID10: + case MPI2_RAID_VOL_TYPE_UNKNOWN: + default: + qdepth = MPT3SAS_RAID_QUEUE_DEPTH; + } + } else if (sas_target_priv_data->flags & + MPT_TARGET_FLAGS_PCIE_DEVICE) + qdepth = MPT3SAS_NVME_QUEUE_DEPTH; + else + qdepth = MPT3SAS_SAS_QUEUE_DEPTH; + + mpt3sas_scsih_change_queue_depth(sdev, qdepth); + } + break; + case 1: + ioc->enable_sdev_max_qd = 1; + shost_for_each_device(sdev, ioc->shost) + mpt3sas_scsih_change_queue_depth(sdev, + shost->can_queue); + break; + default: + return -EINVAL; + } + + return strlen(buf); +} +static DEVICE_ATTR_RW(enable_sdev_max_qd); + struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_version_fw, &dev_attr_version_bios, @@ -3374,7 +3802,9 @@ struct device_attribute *mpt3sas_host_attrs[] = { &dev_attr_diag_trigger_event, &dev_attr_diag_trigger_scsi, &dev_attr_diag_trigger_mpi, + &dev_attr_drv_support_bitmap, &dev_attr_BRM_status, + &dev_attr_enable_sdev_max_qd, NULL, }; @@ -3548,12 +3978,6 @@ mpt3sas_ctl_exit(ushort hbas_to_enumerate) for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) { if (!ioc->diag_buffer[i]) continue; - if (!(ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_REGISTERED)) - continue; - if ((ioc->diag_buffer_status[i] & - MPT3_DIAG_BUFFER_IS_RELEASED)) - continue; dma_free_coherent(&ioc->pdev->dev, ioc->diag_buffer_sz[i], ioc->diag_buffer[i], diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h index 18b46faef6f1..0f7aa4ddade0 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h +++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h @@ -95,6 +95,14 @@ #define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \ struct mpt3_diag_read_buffer) +/* Trace Buffer default UniqueId */ +#define MPT2DIAGBUFFUNIQUEID (0x07075900) +#define MPT3DIAGBUFFUNIQUEID (0x4252434D) + +/* UID not found */ +#define MPT3_DIAG_UID_NOT_FOUND (0xFF) + + /** * struct mpt3_ioctl_header - main header structure * @ioc_number - IOC unit number @@ -310,6 +318,7 @@ struct mpt3_ioctl_btdh_mapping { #define MPT3_APP_FLAGS_APP_OWNED (0x0001) #define MPT3_APP_FLAGS_BUFFER_VALID (0x0002) #define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004) +#define MPT3_APP_FLAGS_DYNAMIC_BUFFER_ALLOC (0x0008) /* flags for mpt3_diag_read_buffer */ #define MPT3_FLAGS_REREGISTER (0x0001) diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c index 717ba0845a2a..c597d544eb39 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c +++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c @@ -51,7 +51,6 @@ #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/pci.h> -#include <linux/pci-aspm.h> #include <linux/interrupt.h> #include <linux/aer.h> #include <linux/raid_class.h> @@ -155,6 +154,10 @@ static int prot_mask = -1; module_param(prot_mask, int, 0444); MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 "); +static bool enable_sdev_max_qd; +module_param(enable_sdev_max_qd, bool, 0444); +MODULE_PARM_DESC(enable_sdev_max_qd, + "Enable sdev max qd as can_queue, def=disabled(0)"); /* raid transport support */ static struct raid_template *mpt3sas_raid_template; @@ -1047,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) } /** + * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency. + * @ioc: per adapter object + * Context: This function will acquire ioc->pcie_device_lock + * + * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency + * which has reported maximum among all available NVMe drives. + * Minimum max_shutdown_latency will be six seconds. + */ +static void +_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc) +{ + struct _pcie_device *pcie_device; + unsigned long flags; + u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + + spin_lock_irqsave(&ioc->pcie_device_lock, flags); + list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) { + if (pcie_device->shutdown_latency) { + if (shutdown_latency < pcie_device->shutdown_latency) + shutdown_latency = + pcie_device->shutdown_latency; + } + } + ioc->max_shutdown_latency = shutdown_latency; + spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); +} + +/** * _scsih_pcie_device_remove - remove pcie_device from list. * @ioc: per adapter object * @pcie_device: the pcie_device object @@ -1060,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, { unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (!pcie_device) return; @@ -1079,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc, list_del_init(&pcie_device->list); was_on_pcie_device_list = 1; } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { kfree(pcie_device->serial_number); pcie_device_put(pcie_device); } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } @@ -1098,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) struct _pcie_device *pcie_device; unsigned long flags; int was_on_pcie_device_list = 0; + u8 update_latency = 0; if (ioc->shost_recovery) return; @@ -1110,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle) was_on_pcie_device_list = 1; pcie_device_put(pcie_device); } + if (pcie_device->shutdown_latency == ioc->max_shutdown_latency) + update_latency = 1; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); if (was_on_pcie_device_list) { _scsih_pcie_device_remove_from_sml(ioc, pcie_device); pcie_device_put(pcie_device); } + + /* + * This device's RTD3 Entry Latency matches IOC's + * max_shutdown_latency. Recalculate IOC's max_shutdown_latency + * from the available drives as current drive is getting removed. + */ + if (update_latency) + _scsih_set_nvme_max_shutdown_latency(ioc); } /** @@ -1152,6 +1205,11 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc, list_add_tail(&pcie_device->list, &ioc->pcie_device_list); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + clear_bit(pcie_device->handle, ioc->pend_os_device_add); + return; + } if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) { _scsih_pcie_device_remove(ioc, pcie_device); } else if (!pcie_device->starget) { @@ -1196,7 +1254,9 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc, spin_lock_irqsave(&ioc->pcie_device_lock, flags); pcie_device_get(pcie_device); list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list); - _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); + if (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) + _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); } /** @@ -1433,17 +1493,20 @@ _scsih_is_end_device(u32 device_info) } /** - * _scsih_is_nvme_device - determines if device is an nvme device + * _scsih_is_nvme_pciescsi_device - determines if + * device is an pcie nvme/scsi device * @device_info: bitfield providing information about the device. * Context: none * - * Return: 1 if nvme device. + * Returns 1 if device is pcie device type nvme/scsi. */ static int -_scsih_is_nvme_device(u32 device_info) +_scsih_is_nvme_pciescsi_device(u32 device_info) { - if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) - == MPI26_PCIE_DEVINFO_NVME) + if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_NVME) || + ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE) + == MPI26_PCIE_DEVINFO_SCSI)) return 1; else return 0; @@ -1509,7 +1572,13 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = shost->can_queue; - /* limit max device queue for SATA to 32 */ + /* + * limit max device queue for SATA to 32 if enable_sdev_max_qd + * is disabled. + */ + if (ioc->enable_sdev_max_qd) + goto not_sata; + sas_device_priv_data = sdev->hostdata; if (!sas_device_priv_data) goto not_sata; @@ -1535,7 +1604,31 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) max_depth = 1; if (qdepth > max_depth) qdepth = max_depth; - return scsi_change_queue_depth(sdev, qdepth); + scsi_change_queue_depth(sdev, qdepth); + sdev_printk(KERN_INFO, sdev, + "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n", + sdev->queue_depth, sdev->tagged_supported, + sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1)); + return sdev->queue_depth; +} + +/** + * mpt3sas_scsih_change_queue_depth - setting device queue depth + * @sdev: scsi device struct + * @qdepth: requested queue depth + * + * Returns nothing. + */ +void +mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth) +{ + struct Scsi_Host *shost = sdev->host; + struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); + + if (ioc->enable_sdev_max_qd) + qdepth = shost->can_queue; + + scsih_change_queue_depth(sdev, qdepth); } /** @@ -2296,7 +2389,7 @@ scsih_slave_configure(struct scsi_device *sdev) MPT3SAS_RAID_MAX_SECTORS); } - scsih_change_queue_depth(sdev, qdepth); + mpt3sas_scsih_change_queue_depth(sdev, qdepth); /* raid transport support */ if (!ioc->is_warpdrive) @@ -2360,7 +2453,7 @@ scsih_slave_configure(struct scsi_device *sdev) pcie_device_put(pcie_device); spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - scsih_change_queue_depth(sdev, qdepth); + mpt3sas_scsih_change_queue_depth(sdev, qdepth); /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be ** merged and can eliminate holes created during merging ** operation. @@ -2420,7 +2513,7 @@ scsih_slave_configure(struct scsi_device *sdev) _scsih_display_sata_capabilities(ioc, handle, sdev); - scsih_change_queue_depth(sdev, qdepth); + mpt3sas_scsih_change_queue_depth(sdev, qdepth); if (ssp_target) { sas_read_port_mode_page(sdev); @@ -2635,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, u16 smid = 0; u32 ioc_state; int rc; + u8 issue_reset = 0; lockdep_assert_held(&ioc->tm_cmds.mutex); @@ -2657,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, } if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { - mpt3sas_base_fault_info(ioc, ioc_state & + mpt3sas_print_fault_code(ioc, ioc_state & + MPI2_DOORBELL_DATA_MASK); + rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); + return (!rc) ? SUCCESS : FAILED; + } else if ((ioc_state & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, ioc_state & MPI2_DOORBELL_DATA_MASK); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); return (!rc) ? SUCCESS : FAILED; @@ -2688,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun, ioc->put_smid_hi_priority(ioc, smid, msix_task); wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ); if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) { - if (mpt3sas_base_check_cmd_timeout(ioc, - ioc->tm_cmds.status, mpi_request, - sizeof(Mpi2SCSITaskManagementRequest_t)/4)) { + mpt3sas_check_cmd_timeout(ioc, + ioc->tm_cmds.status, mpi_request, + sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset); + if (issue_reset) { rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); rc = (!rc) ? SUCCESS : FAILED; @@ -2837,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd) u8 timeout = 30; struct _pcie_device *pcie_device = NULL; - sdev_printk(KERN_INFO, scmd->device, - "attempting task abort! scmd(%p)\n", scmd); + sdev_printk(KERN_INFO, scmd->device, "attempting task abort!" + "scmd(0x%p), outstanding for %u ms & timeout %u ms\n", + scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc), + (scmd->request->timeout / HZ) * 1000); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + "device been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2854,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd) /* check for completed command */ if (st == NULL || st->cb_idx == 0xFF) { + sdev_printk(KERN_INFO, scmd->device, "No reference found at " + "driver, assuming scmd(0x%p) might have completed\n", scmd); scmd->result = DID_RESET << 16; r = SUCCESS; goto out; @@ -2872,7 +2977,8 @@ scsih_abort(struct scsi_cmnd *scmd) handle = sas_device_priv_data->sas_target->handle; pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); - if (pcie_device && (!ioc->tm_custom_handling)) + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) timeout = ioc->nvme_abort_timeout; r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, @@ -2881,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd) if (r == SUCCESS && st->cb_idx != 0xFF) r = FAILED; out: - sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (pcie_device) pcie_device_put(pcie_device); @@ -2910,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd) struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; sdev_printk(KERN_INFO, scmd->device, - "attempting device reset! scmd(%p)\n", scmd); + "attempting device reset! scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { sdev_printk(KERN_INFO, scmd->device, - "device been deleted! scmd(%p)\n", scmd); + "device been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -2943,11 +3049,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd) pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); - if (pcie_device && (!ioc->tm_custom_handling)) { + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { tr_timeout = pcie_device->reset_timeout; tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; } else tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET; + r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun, MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0, tr_timeout, tr_method); @@ -2955,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd) if (r == SUCCESS && atomic_read(&scmd->device->device_busy)) r = FAILED; out: - sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n", + sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -2986,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd) struct scsi_target *starget = scmd->device->sdev_target; struct MPT3SAS_TARGET *target_priv_data = starget->hostdata; - starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n", - scmd); + starget_printk(KERN_INFO, starget, + "attempting target reset! scmd(0x%p)\n", scmd); _scsih_tm_display_info(ioc, scmd); sas_device_priv_data = scmd->device->hostdata; if (!sas_device_priv_data || !sas_device_priv_data->sas_target || ioc->remove_host) { - starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n", - scmd); + starget_printk(KERN_INFO, starget, + "target been deleted! scmd(0x%p)\n", scmd); scmd->result = DID_NO_CONNECT << 16; scmd->scsi_done(scmd); r = SUCCESS; @@ -3020,7 +3128,8 @@ scsih_target_reset(struct scsi_cmnd *scmd) pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle); - if (pcie_device && (!ioc->tm_custom_handling)) { + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) { tr_timeout = pcie_device->reset_timeout; tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; } else @@ -3032,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd) if (r == SUCCESS && atomic_read(&starget->target_busy)) r = FAILED; out: - starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n", + starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n", ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd); if (sas_device) @@ -3055,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host); int r, retval; - ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd); + ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd); scsi_print_command(scmd); if (ioc->is_driver_loading || ioc->remove_host) { @@ -3067,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd) retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); r = (retval < 0) ? FAILED : SUCCESS; out: - ioc_info(ioc, "host reset: %s scmd(%p)\n", + ioc_info(ioc, "host reset: %s scmd(0x%p)\n", r == SUCCESS ? "SUCCESS" : "FAILED", scmd); return r; @@ -3598,7 +3707,9 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle) sas_address = pcie_device->wwid; } spin_unlock_irqrestore(&ioc->pcie_device_lock, flags); - if (pcie_device && (!ioc->tm_custom_handling)) + if (pcie_device && (!ioc->tm_custom_handling) && + (!(mpt3sas_scsih_is_pcie_scsi_device( + pcie_device->device_info)))) tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE; else @@ -4431,6 +4542,7 @@ static void _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, Mpi2EventDataTemperature_t *event_data) { + u32 doorbell; if (ioc->temp_sensors_count >= event_data->SensorNum) { ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n", le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ", @@ -4440,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc, event_data->SensorNum); ioc_err(ioc, "Current Temp In Celsius: %d\n", event_data->CurrentTemperature); + if (ioc->hba_mpi_version_belonged != MPI2_VERSION) { + doorbell = mpt3sas_base_get_iocstate(ioc, 0); + if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_FAULT) { + mpt3sas_print_fault_code(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } else if ((doorbell & MPI2_IOC_STATE_MASK) == + MPI2_IOC_STATE_COREDUMP) { + mpt3sas_print_coredump_info(ioc, + doorbell & MPI2_DOORBELL_DATA_MASK); + } + } } } @@ -4654,11 +4778,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd) * since we're lockless at this point */ do { - if (test_bit(0, &sas_device_priv_data->ata_command_pending)) { - scmd->result = SAM_STAT_BUSY; - scmd->scsi_done(scmd); - return 0; - } + if (test_bit(0, &sas_device_priv_data->ata_command_pending)) + return SCSI_MLQUEUE_DEVICE_BUSY; } while (_scsih_set_satl_pending(scmd, true)); if (scmd->sc_data_direction == DMA_FROM_DEVICE) @@ -5120,7 +5241,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle) /* insert into event log */ sz = offsetof(Mpi2EventNotificationReply_t, EventData) + sizeof(Mpi2EventDataSasDeviceStatusChange_t); - event_reply = kzalloc(sz, GFP_KERNEL); + event_reply = kzalloc(sz, GFP_ATOMIC); if (!event_reply) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); @@ -6456,24 +6577,17 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc, /** * _scsih_sas_device_status_change_event - handle device status change * @ioc: per adapter object - * @fw_event: The fw_event_work object + * @event_data: The fw event * Context: user. */ static void _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, - struct fw_event_work *fw_event) + Mpi2EventDataSasDeviceStatusChange_t *event_data) { struct MPT3SAS_TARGET *target_priv_data; struct _sas_device *sas_device; u64 sas_address; unsigned long flags; - Mpi2EventDataSasDeviceStatusChange_t *event_data = - (Mpi2EventDataSasDeviceStatusChange_t *) - fw_event->event_data; - - if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) - _scsih_sas_device_status_change_event_debug(ioc, - event_data); /* In MPI Revision K (0xC), the internal device reset complete was * implemented, so avoid setting tm_busy flag for older firmware. @@ -6505,6 +6619,12 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc, else target_priv_data->tm_busy = 0; + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + ioc_info(ioc, + "%s tm_busy flag for handle(0x%04x)\n", + (target_priv_data->tm_busy == 1) ? "Enable" : "Disable", + target_priv_data->handle); + out: if (sas_device) sas_device_put(sas_device); @@ -6539,6 +6659,11 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid, break; case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED: desc = "PCIe device blocked"; + ioc_info(ioc, + "Device with Access Status (%s): wwid(0x%016llx), " + "handle(0x%04x)\n ll only be added to the internal list", + desc, (u64)wwid, handle); + rc = 0; break; case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED: desc = "PCIe device mem space access failed"; @@ -6643,7 +6768,8 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc, pcie_device->enclosure_level, pcie_device->connector_name); - if (pcie_device->starget) + if (pcie_device->starget && (pcie_device->access_status != + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)) scsi_remove_target(&pcie_device->starget->dev); dewtprintk(ioc, ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", @@ -6694,7 +6820,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) /* check if this is end device */ device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); - if (!(_scsih_is_nvme_device(device_info))) + if (!(_scsih_is_nvme_pciescsi_device(device_info))) return; wwid = le64_to_cpu(pcie_device_pg0.WWID); @@ -6709,6 +6835,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) if (unlikely(pcie_device->handle != handle)) { starget = pcie_device->starget; sas_target_priv_data = starget->hostdata; + pcie_device->access_status = pcie_device_pg0.AccessStatus; starget_printk(KERN_INFO, starget, "handle changed from(0x%04x) to (0x%04x)!!!\n", pcie_device->handle, handle); @@ -6803,7 +6930,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device_pg0.AccessStatus)) return 0; - if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo)))) + if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu + (pcie_device_pg0.DeviceInfo)))) return 0; pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid); @@ -6813,6 +6941,31 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) return 0; } + /* PCIe Device Page 2 contains read-only information about a + * specific NVMe device; therefore, this page is only + * valid for NVMe devices and skip for pcie devices of type scsi. + */ + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, + &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, + handle)) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + + ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & + MPI2_IOCSTATUS_MASK; + if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { + ioc_err(ioc, + "failure at %s:%d/%s()!\n", __FILE__, + __LINE__, __func__); + return 0; + } + } + pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL); if (!pcie_device) { ioc_err(ioc, "failure at %s:%d/%s()!\n", @@ -6824,6 +6977,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) pcie_device->id = ioc->pcie_target_id++; pcie_device->channel = PCIE_CHANNEL; pcie_device->handle = handle; + pcie_device->access_status = pcie_device_pg0.AccessStatus; pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); pcie_device->wwid = wwid; pcie_device->port_num = pcie_device_pg0.PortNum; @@ -6855,27 +7009,26 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle) le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID); } /* TODO -- Add device name once FW supports it */ - if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply, - &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - kfree(pcie_device); - return 0; - } - - ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK; - if (ioc_status != MPI2_IOCSTATUS_SUCCESS) { - ioc_err(ioc, "failure at %s:%d/%s()!\n", - __FILE__, __LINE__, __func__); - kfree(pcie_device); - return 0; - } - pcie_device->nvme_mdts = - le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); - if (pcie_device_pg2.ControllerResetTO) - pcie_device->reset_timeout = - pcie_device_pg2.ControllerResetTO; - else + if (!(mpt3sas_scsih_is_pcie_scsi_device( + le32_to_cpu(pcie_device_pg0.DeviceInfo)))) { + pcie_device->nvme_mdts = + le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize); + pcie_device->shutdown_latency = + le16_to_cpu(pcie_device_pg2.ShutdownLatency); + /* + * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency + * if drive's RTD3 Entry Latency is greater then IOC's + * max_shutdown_latency. + */ + if (pcie_device->shutdown_latency > ioc->max_shutdown_latency) + ioc->max_shutdown_latency = + pcie_device->shutdown_latency; + if (pcie_device_pg2.ControllerResetTO) + pcie_device->reset_timeout = + pcie_device_pg2.ControllerResetTO; + else + pcie_device->reset_timeout = 30; + } else pcie_device->reset_timeout = 30; if (ioc->wait_for_discovery_to_complete) @@ -7606,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num) wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ); if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { - issue_reset = - mpt3sas_base_check_cmd_timeout(ioc, - ioc->scsih_cmds.status, mpi_request, - sizeof(Mpi2RaidActionRequest_t)/4); + mpt3sas_check_cmd_timeout(ioc, + ioc->scsih_cmds.status, mpi_request, + sizeof(Mpi2RaidActionRequest_t)/4, issue_reset); rc = -EFAULT; goto out; } @@ -8507,6 +8659,8 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc, if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID)) && (pcie_device->slot == le16_to_cpu( pcie_device_pg0->Slot))) { + pcie_device->access_status = + pcie_device_pg0->AccessStatus; pcie_device->responding = 1; starget = pcie_device->starget; if (starget && starget->hostdata) { @@ -8594,7 +8748,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc) } handle = le16_to_cpu(pcie_device_pg0.DevHandle); device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo); - if (!(_scsih_is_nvme_device(device_info))) + if (!(_scsih_is_nvme_pciescsi_device(device_info))) continue; _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0); } @@ -9175,7 +9329,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc) break; } handle = le16_to_cpu(pcie_device_pg0.DevHandle); - if (!(_scsih_is_nvme_device( + if (!(_scsih_is_nvme_pciescsi_device( le32_to_cpu(pcie_device_pg0.DeviceInfo)))) continue; pcie_device = mpt3sas_get_pdev_by_wwid(ioc, @@ -9207,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc) } /** - * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih) + * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding + * scsi & tm cmds. * @ioc: per adapter object * * The handler for doing any required cleanup or initialization. */ void -mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc) +mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc) { - dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__)); + dtmprintk(ioc, + ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__)); if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) { ioc->scsih_cmds.status |= MPT3_CMD_RESET; mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid); @@ -9292,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) } _scsih_remove_unresponding_devices(ioc); _scsih_scan_for_devices_after_reset(ioc); + _scsih_set_nvme_max_shutdown_latency(ioc); break; case MPT3SAS_PORT_ENABLE_COMPLETE: ioc->start_scan = 0; @@ -9308,7 +9465,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event) _scsih_sas_topology_change_event(ioc, fw_event); break; case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: - _scsih_sas_device_status_change_event(ioc, fw_event); + if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) + _scsih_sas_device_status_change_event_debug(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + fw_event->event_data); break; case MPI2_EVENT_SAS_DISCOVERY: _scsih_sas_discovery_event(ioc, fw_event); @@ -9481,6 +9641,10 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, break; } case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE: + _scsih_sas_device_status_change_event(ioc, + (Mpi2EventDataSasDeviceStatusChange_t *) + mpi_reply->EventData); + break; case MPI2_EVENT_IR_OPERATION_STATUS: case MPI2_EVENT_SAS_DISCOVERY: case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR: @@ -9588,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc, } /** + * _scsih_nvme_shutdown - NVMe shutdown notification + * @ioc: per adapter object + * + * Sending IoUnitControl request with shutdown operation code to alert IOC that + * the host system is shutting down so that IOC can issue NVMe shutdown to + * NVMe drives attached to it. + */ +static void +_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc) +{ + Mpi26IoUnitControlRequest_t *mpi_request; + Mpi26IoUnitControlReply_t *mpi_reply; + u16 smid; + + /* are there any NVMe devices ? */ + if (list_empty(&ioc->pcie_device_list)) + return; + + mutex_lock(&ioc->scsih_cmds.mutex); + + if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) { + ioc_err(ioc, "%s: scsih_cmd in use\n", __func__); + goto out; + } + + ioc->scsih_cmds.status = MPT3_CMD_PENDING; + + smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx); + if (!smid) { + ioc_err(ioc, + "%s: failed obtaining a smid\n", __func__); + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + goto out; + } + + mpi_request = mpt3sas_base_get_msg_frame(ioc, smid); + ioc->scsih_cmds.smid = smid; + memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t)); + mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL; + mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN; + + init_completion(&ioc->scsih_cmds.done); + ioc->put_smid_default(ioc, smid); + /* Wait for max_shutdown_latency seconds */ + ioc_info(ioc, + "Io Unit Control shutdown (sending), Shutdown latency %d sec\n", + ioc->max_shutdown_latency); + wait_for_completion_timeout(&ioc->scsih_cmds.done, + ioc->max_shutdown_latency*HZ); + + if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) { + ioc_err(ioc, "%s: timeout\n", __func__); + goto out; + } + + if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) { + mpi_reply = ioc->scsih_cmds.reply; + ioc_info(ioc, "Io Unit Control shutdown (complete):" + "ioc_status(0x%04x), loginfo(0x%08x)\n", + le16_to_cpu(mpi_reply->IOCStatus), + le32_to_cpu(mpi_reply->IOCLogInfo)); + } + out: + ioc->scsih_cmds.status = MPT3_CMD_NOT_USED; + mutex_unlock(&ioc->scsih_cmds.mutex); +} + + +/** * _scsih_ir_shutdown - IR shutdown notification * @ioc: per adapter object * @@ -9779,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev) &ioc->ioc_pg1_copy); _scsih_ir_shutdown(ioc); + _scsih_nvme_shutdown(ioc); mpt3sas_base_detach(ioc); } @@ -10039,6 +10273,12 @@ _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc) pcie_device_put(pcie_device); continue; } + if (pcie_device->access_status == + MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) { + pcie_device_make_active(ioc, pcie_device); + pcie_device_put(pcie_device); + continue; + } rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0); if (rc) { @@ -10115,6 +10355,8 @@ scsih_scan_start(struct Scsi_Host *shost) int rc; if (diag_buffer_enable != -1 && diag_buffer_enable != 0) mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable); + else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0) + mpt3sas_enable_diag_buffer(ioc, 1); if (disable_discovery > 0) return; @@ -10453,6 +10695,15 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx; ioc->logging_level = logging_level; ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds; + /* Host waits for minimum of six seconds */ + ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT; + /* + * Enable MEMORY MOVE support flag. + */ + ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE; + + ioc->enable_sdev_max_qd = enable_sdev_max_qd; + /* misc semaphores and spin locks */ mutex_init(&ioc->reset_in_progress_mutex); /* initializing pci_access_mutex lock */ @@ -10594,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state) mpt3sas_base_stop_watchdog(ioc); flush_scheduled_work(); scsi_block_requests(shost); + _scsih_nvme_shutdown(ioc); device_state = pci_choose_state(pdev, state); ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n", pdev, pci_name(pdev), device_state); @@ -10628,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev) r = mpt3sas_base_map_resources(ioc); if (r) return r; - + ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n"); mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET); scsi_unblock_requests(shost); mpt3sas_base_start_watchdog(ioc); @@ -10697,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev) if (rc) return PCI_ERS_RESULT_DISCONNECT; + ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n"); rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER); ioc_warn(ioc, "hard reset: %s\n", diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index 5324662751bf..6ec5b7f33dfd 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -719,11 +719,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, sas_device_put(sas_device); } - if ((ioc->logging_level & MPT_DEBUG_TRANSPORT)) - dev_printk(KERN_INFO, &rphy->dev, - "add: handle(0x%04x), sas_addr(0x%016llx)\n", - handle, (unsigned long long) - mpt3sas_port->remote_identify.sas_address); + dev_info(&rphy->dev, + "add: handle(0x%04x), sas_addr(0x%016llx)\n", handle, + (unsigned long long)mpt3sas_port->remote_identify.sas_address); + mpt3sas_port->rphy = rphy; spin_lock_irqsave(&ioc->sas_node_lock, flags); list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list); @@ -813,6 +812,8 @@ mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address, } if (!ioc->remove_host) sas_port_delete(mpt3sas_port->port); + ioc_info(ioc, "%s: removed: sas_addr(0x%016llx)\n", + __func__, (unsigned long long)sas_address); kfree(mpt3sas_port); } diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c index 6ac453fd5937..8ec9bab20ec4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c +++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c @@ -113,15 +113,21 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc, struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data) { u8 issue_reset = 0; + u32 *trig_data = (u32 *)&event_data->u.master; dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: enter\n", __func__)); /* release the diag buffer trace */ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] & MPT3_DIAG_BUFFER_IS_RELEASED) == 0) { - dTriggerDiagPrintk(ioc, - ioc_info(ioc, "%s: release trace diag buffer\n", - __func__)); + /* + * add a log message so that user knows which event caused + * the release + */ + ioc_info(ioc, + "%s: Releasing the trace buffer. Trigger_Type 0x%08x, Data[0] 0x%08x, Data[1] 0x%08x\n", + __func__, event_data->trigger_type, + trig_data[0], trig_data[1]); mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE, &issue_reset); } diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index da719b0694dc..7af9173c4925 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -47,6 +47,9 @@ static struct scsi_host_template mvs_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = mvst_host_attrs, .track_queue_depth = 1, }; diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 3e0b8ebe257f..a920eced92ec 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1541,7 +1541,7 @@ out: int mvs_abort_task_set(struct domain_device *dev, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; + int rc; struct mvs_tmf_task tmf_task; tmf_task.tmf = TMF_ABORT_TASK_SET; diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c index 539ac8ce4fcd..d4bd31a75b9d 100644 --- a/drivers/scsi/myrb.c +++ b/drivers/scsi/myrb.c @@ -3531,7 +3531,7 @@ static struct myrb_hba *myrb_detect(struct pci_dev *pdev, spin_lock_init(&cb->queue_lock); if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size); + cb->mmio_base = ioremap(cb->pci_addr & PAGE_MASK, mmio_size); if (cb->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/myrb.h b/drivers/scsi/myrb.h index 9289c19fcb2f..fb8eacfceee8 100644 --- a/drivers/scsi/myrb.h +++ b/drivers/scsi/myrb.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com> diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c index eb0dd566330a..5c5666491c2e 100644 --- a/drivers/scsi/myrs.c +++ b/drivers/scsi/myrs.c @@ -2311,7 +2311,7 @@ static struct myrs_hba *myrs_detect(struct pci_dev *pdev, /* Map the Controller Register Window. */ if (mmio_size < PAGE_SIZE) mmio_size = PAGE_SIZE; - cs->mmio_base = ioremap_nocache(cs->pci_addr & PAGE_MASK, mmio_size); + cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size); if (cs->mmio_base == NULL) { dev_err(&pdev->dev, "Unable to map Controller Register Window\n"); diff --git a/drivers/scsi/myrs.h b/drivers/scsi/myrs.h index e6702ee85e9f..9f6696d0ddd5 100644 --- a/drivers/scsi/myrs.h +++ b/drivers/scsi/myrs.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * +/* SPDX-License-Identifier: GPL-2.0 */ +/* * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers * * This driver supports the newer, SCSI-based firmware interface only. diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c index e6a95498ac0d..11a2cb844ecb 100644 --- a/drivers/scsi/ncr53c8xx.c +++ b/drivers/scsi/ncr53c8xx.c @@ -1722,7 +1722,7 @@ struct ncb { ** Miscellaneous configuration and status parameters. **---------------------------------------------------------------- */ - u_char disc; /* Diconnection allowed */ + u_char disc; /* Disconnection allowed */ u_char scsi_mode; /* Current SCSI BUS mode */ u_char order; /* Tag order to use */ u_char verbose; /* Verbosity for this controller*/ @@ -3910,11 +3910,14 @@ static void __init ncr_prepare_setting(struct ncb *np) np->scsi_mode = SMODE_HVD; break; } + /* fall through */ case 3: /* SYMBIOS controllers report HVD through GPIO3 */ if (INB(nc_gpreg) & 0x08) break; + /* fall through */ case 2: /* Set HVD unconditionally */ np->scsi_mode = SMODE_HVD; + /* fall through */ case 1: /* Trust previous settings for HVD */ if (np->sv_stest2 & 0x20) np->scsi_mode = SMODE_HVD; @@ -6714,6 +6717,7 @@ void ncr_int_sir (struct ncb *np) OUTL_DSP (scr_to_cpu(tp->lp[0]->jump_ccb[0])); return; } + /* fall through */ case SIR_RESEL_BAD_TARGET: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_LUN: /* Will send a TARGET RESET message */ case SIR_RESEL_BAD_I_T_L_Q: /* Will send an ABORT TAG message */ diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index 70db79254155..b6e04d14292d 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -1542,7 +1542,7 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt) * with ACK reply when below condition is matched: * MsgIn 00: Command Complete. * MsgIn 02: Save Data Pointer. - * MsgIn 04: Diconnect. + * MsgIn 04: Disconnect. * In other case, unexpected BUSFREE is detected. */ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph) diff --git a/drivers/scsi/pcmcia/Kconfig b/drivers/scsi/pcmcia/Kconfig index 2368f34efba3..dc9b74c9348a 100644 --- a/drivers/scsi/pcmcia/Kconfig +++ b/drivers/scsi/pcmcia/Kconfig @@ -32,7 +32,7 @@ config PCMCIA_FDOMAIN config PCMCIA_NINJA_SCSI tristate "NinjaSCSI-3 / NinjaSCSI-32Bi (16bit) PCMCIA support" - depends on !64BIT + depends on !64BIT || COMPILE_TEST help If you intend to attach this type of PCMCIA SCSI host adapter to your computer, say Y here and read diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c index 97416e1dcc5b..d79ce97a04bd 100644 --- a/drivers/scsi/pcmcia/nsp_cs.c +++ b/drivers/scsi/pcmcia/nsp_cs.c @@ -56,9 +56,7 @@ MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>"); MODULE_DESCRIPTION("WorkBit NinjaSCSI-3 / NinjaSCSI-32Bi(16bit) PCMCIA SCSI host adapter module"); MODULE_SUPPORTED_DEVICE("sd,sr,sg,st"); -#ifdef MODULE_LICENSE MODULE_LICENSE("GPL"); -#endif #include "nsp_io.h" @@ -1562,7 +1560,7 @@ static int nsp_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) goto next_entry; data->MmioAddress = (unsigned long) - ioremap_nocache(p_dev->resource[2]->start, + ioremap(p_dev->resource[2]->start, resource_size(p_dev->resource[2])); data->MmioLength = resource_size(p_dev->resource[2]); } diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 6b85016b4db3..7c6be2ec110d 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -70,6 +70,25 @@ static DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); /** + * controller_fatal_error_show - check controller is under fatal err + * @cdev: pointer to embedded class device + * @buf: the buffer returned + * + * A sysfs 'read only' shost attribute. + */ +static ssize_t controller_fatal_error_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + struct Scsi_Host *shost = class_to_shost(cdev); + struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost); + struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; + + return snprintf(buf, PAGE_SIZE, "%d\n", + pm8001_ha->controller_fatal_error); +} +static DEVICE_ATTR_RO(controller_fatal_error); + +/** * pm8001_ctl_fw_version_show - firmware version * @cdev: pointer to embedded class device * @buf: the buffer returned @@ -804,6 +823,7 @@ static DEVICE_ATTR(update_fw, S_IRUGO|S_IWUSR|S_IWGRP, pm8001_show_update_fw, pm8001_store_update_fw); struct device_attribute *pm8001_host_attrs[] = { &dev_attr_interface_rev, + &dev_attr_controller_fatal_error, &dev_attr_fw_version, &dev_attr_update_fw, &dev_attr_aap_log, diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 68a8217032d0..2328ff1349ac 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -1186,7 +1186,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; - for (bar = 0; bar < 6; bar++) { + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { /* ** logical BARs for SPC: ** bar 0 and 1 - logical BAR0 @@ -1336,10 +1336,13 @@ int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. + * @nb: size in bytes of the command payload + * @responseQueue: queue to interrupt on w/ command response (if any) */ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload, u32 responseQueue) + u32 opCode, void *payload, size_t nb, + u32 responseQueue) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; void *pMessage; @@ -1350,10 +1353,13 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -ENOMEM; } - BUG_ON(!payload); - /*Copy to the payload*/ - memcpy(pMessage, payload, (pm8001_ha->iomb_size - - sizeof(struct mpi_msg_hdr))); + + if (nb > (pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr))) + nb = pm8001_ha->iomb_size - sizeof(struct mpi_msg_hdr); + memcpy(pMessage, payload, nb); + if (nb + sizeof(struct mpi_msg_hdr) < pm8001_ha->iomb_size) + memset(pMessage + nb, 0, pm8001_ha->iomb_size - + (nb + sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) @@ -1364,7 +1370,7 @@ int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, /*Update the PI to the firmware*/ pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("INB Q %x OPCODE:%x , UPDATED PI=%d CI=%d\n", responseQueue, opCode, circularQ->producer_idx, circularQ->consumer_index)); @@ -1436,6 +1442,10 @@ u32 pm8001_mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "outbound opcode msgheader:%x ci=%d pi=%d\n", + msgHeader_tmp, circularQ->consumer_idx, + circularQ->producer_index)); if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { if (OPC_OUB_SKIP_ENTRY != (le32_to_cpu(msgHeader_tmp) & 0xfff)) { @@ -1604,7 +1614,8 @@ void pm8001_work_fn(struct work_struct *work) break; default: - pm8001_printk("...query task failed!!!\n"); + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "...query task failed!!!\n")); break; }); @@ -1758,7 +1769,8 @@ static void pm8001_send_abort_all(struct pm8001_hba_info *pm8001_ha, task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); task_abort.tag = cpu_to_le32(ccb_tag); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, + sizeof(task_abort), 0); if (ret) pm8001_tag_free(pm8001_ha, ccb_tag); @@ -1831,7 +1843,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.ncqtag_atap_dir_m |= ((0x1 << 7) | (0x5 << 9)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, + sizeof(sata_cmd), 0); if (res) { sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); @@ -1890,6 +1903,11 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("SAS Address of IO Failure Drive:" "%016llx", SAS_ADDR(t->dev->sas_addr))); + if (status) + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk( + "status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t)); + switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" @@ -2072,7 +2090,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2125,7 +2143,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("port_id = %x,device_id = %x\n", port_id, dev_id)); switch (event) { @@ -2263,7 +2281,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); return; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2352,6 +2370,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("ts null\n")); return; } + + if (status) + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk( + "status:0x%x, tag:0x%x, task::0x%p\n", + status, tag, t)); + /* Print sas address of IO failed device */ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && (status != IO_UNDERFLOW)) { @@ -2652,7 +2676,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2723,7 +2747,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, pm8001_printk( + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( "port_id:0x%x, device_id:0x%x, tag:0x%x, event:0x%x\n", port_id, dev_id, tag, event)); switch (event) { @@ -2872,7 +2896,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->stat = SAS_OPEN_TO; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2917,9 +2941,13 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) t = ccb->task; ts = &t->task_status; pm8001_dev = ccb->device; - if (status) + if (status) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("smp IO status 0x%x\n", status)); + PM8001_IOERR_DBG(pm8001_ha, + pm8001_printk("status:0x%x, tag:0x%x, task:0x%p\n", + status, tag, t)); + } if (unlikely(!t || !t->lldd_task || !t->dev)) return; @@ -3070,7 +3098,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; @@ -3355,7 +3383,8 @@ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -3416,7 +3445,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_get_lrate_mode(phy, link_rate); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("unknown device type(%x)\n", deviceType)); break; } @@ -3463,7 +3492,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; unsigned long flags; - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; @@ -3541,7 +3570,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) break; default: port->port_attached = 0; - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(" phy Down and(default) = %x\n", portstate)); break; @@ -3689,7 +3718,7 @@ int pm8001_mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, pm8001_printk(": FLASH_UPDATE_DISABLED\n")); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("No matched status = %d\n", status)); break; } @@ -3805,8 +3834,9 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("outbound queue HW event & event type : ")); + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "SPC HW event for portid:%d, phyid:%d, event:%x, status:%x\n", + port_id, phy_id, eventType, status)); switch (eventType) { case HW_EVENT_PHY_START_STATUS: PM8001_MSG_DBG(pm8001_ha, @@ -3990,7 +4020,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown event type = %x\n", eventType)); break; } @@ -4161,7 +4191,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n")); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n", opc)); break; @@ -4284,7 +4314,7 @@ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, - (u32 *)&smp_cmd, 0); + &smp_cmd, sizeof(smp_cmd), 0); if (rc) goto err_out_2; @@ -4352,7 +4382,8 @@ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd, + sizeof(ssp_cmd), 0); return ret; } @@ -4461,7 +4492,8 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } } - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, + sizeof(sata_cmd), 0); return ret; } @@ -4496,7 +4528,8 @@ pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, + sizeof(payload), 0); return ret; } @@ -4518,7 +4551,8 @@ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, + sizeof(payload), 0); return ret; } @@ -4577,7 +4611,8 @@ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); return rc; } @@ -4598,7 +4633,8 @@ int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); return ret; } @@ -4621,7 +4657,8 @@ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); return ret; } @@ -4649,6 +4686,9 @@ static irqreturn_t pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { pm8001_chip_interrupt_disable(pm8001_ha, vec); + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30))); process_oq(pm8001_ha, vec); pm8001_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; @@ -4672,7 +4712,8 @@ static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, + sizeof(task_abort), 0); return ret; } @@ -4729,7 +4770,8 @@ int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, if (pm8001_ha->chip_id != chip_8001) sspTMCmd.ds_ads_m = 0x08; circularQ = &pm8001_ha->inbnd_q_tbl[0]; - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd, + sizeof(sspTMCmd), 0); return ret; } @@ -4819,7 +4861,8 @@ int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, + sizeof(nvmd_req), 0); if (rc) { kfree(fw_control_context); pm8001_tag_free(pm8001_ha, tag); @@ -4903,7 +4946,8 @@ int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, default: break; } - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req, + sizeof(nvmd_req), 0); if (rc) { kfree(fw_control_context); pm8001_tag_free(pm8001_ha, tag); @@ -4938,7 +4982,8 @@ pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); return ret; } @@ -4960,6 +5005,8 @@ pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, if (!fw_control_context) return -ENOMEM; fw_control = (struct fw_control_info *)&ioctl_payload->func_specific; + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "dma fw_control context input length :%x\n", fw_control->len)); memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); @@ -5083,7 +5130,8 @@ pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); return rc; } @@ -5108,7 +5156,8 @@ pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); return rc; diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 3374f553c617..3c6076e4c6d2 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -41,6 +41,19 @@ #include <linux/slab.h> #include "pm8001_sas.h" #include "pm8001_chips.h" +#include "pm80xx_hwi.h" + +static ulong logging_level = PM8001_FAIL_LOGGING | PM8001_IOERR_LOGGING; +module_param(logging_level, ulong, 0644); +MODULE_PARM_DESC(logging_level, " bits for enabling logging info."); + +static ulong link_rate = LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | LINKRATE_120; +module_param(link_rate, ulong, 0644); +MODULE_PARM_DESC(link_rate, "Enable link rate.\n" + " 1: Link rate 1.5G\n" + " 2: Link rate 3.0G\n" + " 4: Link rate 6.0G\n" + " 8: Link rate 12.0G\n"); static struct scsi_transport_template *pm8001_stt; @@ -88,6 +101,9 @@ static struct scsi_host_template pm8001_sht = { .eh_target_reset_handler = sas_eh_target_reset_handler, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = sas_ioctl, +#endif .shost_attrs = pm8001_host_attrs, .track_queue_depth = 1, }; @@ -401,7 +417,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) pdev = pm8001_ha->pdev; /* map pci mem (PMC pci base 0-3)*/ - for (bar = 0; bar < 6; bar++) { + for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { /* ** logical BARs for SPC: ** bar 0 and 1 - logical BAR0 @@ -432,7 +448,7 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) } else { pm8001_ha->io_mem[logicalBar].membase = 0; pm8001_ha->io_mem[logicalBar].memsize = 0; - pm8001_ha->io_mem[logicalBar].memvirtaddr = 0; + pm8001_ha->io_mem[logicalBar].memvirtaddr = NULL; } logicalBar++; } @@ -466,7 +482,15 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev, pm8001_ha->sas = sha; pm8001_ha->shost = shost; pm8001_ha->id = pm8001_id++; - pm8001_ha->logging_level = 0x01; + pm8001_ha->logging_level = logging_level; + if (link_rate >= 1 && link_rate <= 15) + pm8001_ha->link_rate = (link_rate << 8); + else { + pm8001_ha->link_rate = LINKRATE_15 | LINKRATE_30 | + LINKRATE_60 | LINKRATE_120; + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "Setting link rate to default value\n")); + } sprintf(pm8001_ha->name, "%s%d", DRV_NAME, pm8001_ha->id); /* IOMB size is 128 for 8088/89 controllers */ if (pm8001_ha->chip_id != chip_8001) @@ -873,7 +897,6 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) u32 number_of_intr; int flag = 0; int rc; - static char intr_drvname[PM8001_MAX_MSIX_VEC][sizeof(DRV_NAME)+3]; /* SPCv controllers supports 64 msi-x */ if (pm8001_ha->chip_id == chip_8001) { @@ -894,14 +917,16 @@ static u32 pm8001_setup_msix(struct pm8001_hba_info *pm8001_ha) rc, pm8001_ha->number_of_intr)); for (i = 0; i < number_of_intr; i++) { - snprintf(intr_drvname[i], sizeof(intr_drvname[0]), - DRV_NAME"%d", i); + snprintf(pm8001_ha->intr_drvname[i], + sizeof(pm8001_ha->intr_drvname[0]), + "%s-%d", pm8001_ha->name, i); pm8001_ha->irq_vector[i].irq_id = i; pm8001_ha->irq_vector[i].drv_inst = pm8001_ha; rc = request_irq(pci_irq_vector(pm8001_ha->pdev, i), pm8001_interrupt_handler_msix, flag, - intr_drvname[i], &(pm8001_ha->irq_vector[i])); + pm8001_ha->intr_drvname[i], + &(pm8001_ha->irq_vector[i])); if (rc) { for (j = 0; j < i; j++) { free_irq(pci_irq_vector(pm8001_ha->pdev, i), @@ -942,7 +967,7 @@ intx: pm8001_ha->irq_vector[0].irq_id = 0; pm8001_ha->irq_vector[0].drv_inst = pm8001_ha; rc = request_irq(pdev->irq, pm8001_interrupt_handler_intx, IRQF_SHARED, - DRV_NAME, SHOST_TO_SAS_HA(pm8001_ha->shost)); + pm8001_ha->name, SHOST_TO_SAS_HA(pm8001_ha->shost)); return rc; } diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 9453705f643a..b7cbc312843e 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -119,7 +119,7 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, mem_virt_alloc = dma_alloc_coherent(&pdev->dev, mem_size + align, &mem_dma_handle, GFP_KERNEL); if (!mem_virt_alloc) { - pm8001_printk("memory allocation error\n"); + pr_err("pm80xx: memory allocation error\n"); return -1; } *pphys_addr = mem_dma_handle; @@ -249,6 +249,8 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, spin_unlock_irqrestore(&pm8001_ha->lock, flags); return 0; default: + PM8001_DEVIO_DBG(pm8001_ha, + pm8001_printk("func 0x%x\n", func)); rc = -EOPNOTSUPP; } msleep(300); @@ -384,8 +386,9 @@ static int pm8001_task_exec(struct sas_task *task, struct pm8001_port *port = NULL; struct sas_task *t = task; struct pm8001_ccb_info *ccb; - u32 tag = 0xdeadbeef, rc, n_elem = 0; + u32 tag = 0xdeadbeef, rc = 0, n_elem = 0; unsigned long flags = 0; + enum sas_protocol task_proto = t->task_proto; if (!dev->port) { struct task_status_struct *tsm = &t->task_status; @@ -410,7 +413,7 @@ static int pm8001_task_exec(struct sas_task *task, pm8001_dev = dev->lldd_dev; port = &pm8001_ha->port[sas_find_local_port_id(dev)]; if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { - if (sas_protocol_ata(t->task_proto)) { + if (sas_protocol_ata(task_proto)) { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; @@ -432,7 +435,7 @@ static int pm8001_task_exec(struct sas_task *task, goto err_out; ccb = &pm8001_ha->ccb_info[tag]; - if (!sas_protocol_ata(t->task_proto)) { + if (!sas_protocol_ata(task_proto)) { if (t->num_scatter) { n_elem = dma_map_sg(pm8001_ha->dev, t->scatter, @@ -452,7 +455,7 @@ static int pm8001_task_exec(struct sas_task *task, ccb->ccb_tag = tag; ccb->task = t; ccb->device = pm8001_dev; - switch (t->task_proto) { + switch (task_proto) { case SAS_PROTOCOL_SMP: rc = pm8001_task_prep_smp(pm8001_ha, ccb); break; @@ -469,8 +472,7 @@ static int pm8001_task_exec(struct sas_task *task, break; default: dev_printk(KERN_ERR, pm8001_ha->dev, - "unknown sas_task proto: 0x%x\n", - t->task_proto); + "unknown sas_task proto: 0x%x\n", task_proto); rc = -EINVAL; break; } @@ -493,7 +495,7 @@ err_out_tag: pm8001_tag_free(pm8001_ha, tag); err_out: dev_printk(KERN_ERR, pm8001_ha->dev, "pm8001 exec failed[%d]!\n", rc); - if (!sas_protocol_ata(t->task_proto)) + if (!sas_protocol_ata(task_proto)) if (n_elem) dma_unmap_sg(pm8001_ha->dev, t->scatter, t->num_scatter, t->data_dir); @@ -1179,7 +1181,7 @@ int pm8001_query_task(struct sas_task *task) break; } } - pm8001_printk(":rc= %d\n", rc); + pr_err("pm80xx: rc= %d\n", rc); return rc; } @@ -1202,8 +1204,8 @@ int pm8001_abort_task(struct sas_task *task) pm8001_dev = dev->lldd_dev; pm8001_ha = pm8001_find_ha_by_dev(dev); phy_id = pm8001_dev->attached_phy; - rc = pm8001_find_tag(task, &tag); - if (rc == 0) { + ret = pm8001_find_tag(task, &tag); + if (ret == 0) { pm8001_printk("no tag for task:%p\n", task); return TMF_RESP_FUNC_FAILED; } @@ -1241,26 +1243,50 @@ int pm8001_abort_task(struct sas_task *task) /* 2. Send Phy Control Hard Reset */ reinit_completion(&completion); + phy->port_reset_status = PORT_RESET_TMO; phy->reset_success = false; phy->enable_completion = &completion; phy->reset_completion = &completion_reset; ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id, PHY_HARD_RESET); - if (ret) - goto out; - PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("Waiting for local phy ctl\n")); - wait_for_completion(&completion); - if (!phy->reset_success) + if (ret) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; goto out; + } - /* 3. Wait for Port Reset complete / Port reset TMO */ + /* In the case of the reset timeout/fail we still + * abort the command at the firmware. The assumption + * here is that the drive is off doing something so + * that it's not processing requests, and we want to + * avoid getting a completion for this and either + * leaking the task in libsas or losing the race and + * getting a double free. + */ PM8001_MSG_DBG(pm8001_ha, + pm8001_printk("Waiting for local phy ctl\n")); + ret = wait_for_completion_timeout(&completion, + PM8001_TASK_TIMEOUT * HZ); + if (!ret || !phy->reset_success) { + phy->enable_completion = NULL; + phy->reset_completion = NULL; + } else { + /* 3. Wait for Port Reset complete or + * Port reset TMO + */ + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Waiting for Port reset\n")); - wait_for_completion(&completion_reset); - if (phy->port_reset_status) { - pm8001_dev_gone_notify(dev); - goto out; + ret = wait_for_completion_timeout( + &completion_reset, + PM8001_TASK_TIMEOUT * HZ); + if (!ret) + phy->reset_completion = NULL; + WARN_ON(phy->port_reset_status == + PORT_RESET_TMO); + if (phy->port_reset_status == PORT_RESET_TMO) { + pm8001_dev_gone_notify(dev); + goto out; + } } /* @@ -1308,28 +1334,22 @@ out: int pm8001_abort_task_set(struct domain_device *dev, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_ABORT_TASK_SET; - rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); - return rc; + return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); } int pm8001_clear_aca(struct domain_device *dev, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; tmf_task.tmf = TMF_CLEAR_ACA; - rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); - - return rc; + return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); } int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) { - int rc = TMF_RESP_FUNC_FAILED; struct pm8001_tmf_task tmf_task; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); @@ -1338,7 +1358,6 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun) pm8001_printk("I_T_L_Q clear task set[%x]\n", pm8001_dev->device_id)); tmf_task.tmf = TMF_CLEAR_TASK_SET; - rc = pm8001_issue_ssp_tmf(dev, lun, &tmf_task); - return rc; + return pm8001_issue_ssp_tmf(dev, lun, &tmf_task); } diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index ff17c6aff63d..93438c8f67da 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -66,8 +66,11 @@ #define PM8001_EH_LOGGING 0x10 /* libsas EH function logging*/ #define PM8001_IOCTL_LOGGING 0x20 /* IOCTL message logging */ #define PM8001_MSG_LOGGING 0x40 /* misc message logging */ -#define pm8001_printk(format, arg...) printk(KERN_INFO "pm80xx %s %d:" \ - format, __func__, __LINE__, ## arg) +#define PM8001_DEV_LOGGING 0x80 /* development message logging */ +#define PM8001_DEVIO_LOGGING 0x100 /* development io message logging */ +#define PM8001_IOERR_LOGGING 0x200 /* development io err message logging */ +#define pm8001_printk(format, arg...) pr_info("%s:: %s %d:" \ + format, pm8001_ha->name, __func__, __LINE__, ## arg) #define PM8001_CHECK_LOGGING(HBA, LEVEL, CMD) \ do { \ if (unlikely(HBA->logging_level & LEVEL)) \ @@ -97,6 +100,14 @@ do { \ #define PM8001_MSG_DBG(HBA, CMD) \ PM8001_CHECK_LOGGING(HBA, PM8001_MSG_LOGGING, CMD) +#define PM8001_DEV_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_DEV_LOGGING, CMD) + +#define PM8001_DEVIO_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_DEVIO_LOGGING, CMD) + +#define PM8001_IOERR_DBG(HBA, CMD) \ + PM8001_CHECK_LOGGING(HBA, PM8001_IOERR_LOGGING, CMD) #define PM8001_USE_TASKLET #define PM8001_USE_MSIX @@ -141,6 +152,8 @@ struct pm8001_ioctl_payload { #define MPI_FATAL_EDUMP_TABLE_HANDSHAKE 0x0C /* FDDHSHK */ #define MPI_FATAL_EDUMP_TABLE_STATUS 0x10 /* FDDTSTAT */ #define MPI_FATAL_EDUMP_TABLE_ACCUM_LEN 0x14 /* ACCDDLEN */ +#define MPI_FATAL_EDUMP_TABLE_TOTAL_LEN 0x18 /* TOTALLEN */ +#define MPI_FATAL_EDUMP_TABLE_SIGNATURE 0x1C /* SIGNITURE */ #define MPI_FATAL_EDUMP_HANDSHAKE_RDY 0x1 #define MPI_FATAL_EDUMP_HANDSHAKE_BUSY 0x0 #define MPI_FATAL_EDUMP_TABLE_STAT_RSVD 0x0 @@ -496,6 +509,7 @@ struct pm8001_hba_info { u32 forensic_last_offset; u32 fatal_forensic_shift_offset; u32 forensic_fatal_step; + u32 forensic_preserved_accumulated_transfer; u32 evtlog_ib_offset; u32 evtlog_ob_offset; void __iomem *msg_unit_tbl_addr;/*Message Unit Table Addr*/ @@ -530,11 +544,14 @@ struct pm8001_hba_info { struct pm8001_ccb_info *ccb_info; #ifdef PM8001_USE_MSIX int number_of_intr;/*will be used in remove()*/ + char intr_drvname[PM8001_MAX_MSIX_VEC] + [PM8001_NAME_LENGTH+1+3+1]; #endif #ifdef PM8001_USE_TASKLET struct tasklet_struct tasklet[PM8001_MAX_MSIX_VEC]; #endif u32 logging_level; + u32 link_rate; u32 fw_status; u32 smp_exp_mode; bool controller_fatal_error; @@ -663,7 +680,8 @@ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha); int pm8001_mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, - u32 opCode, void *payload, u32 responseQueue); + u32 opCode, void *payload, size_t nb, + u32 responseQueue); int pm8001_mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr); u32 pm8001_mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 73261902d75d..d1d95f1a2c6a 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -75,7 +75,7 @@ void pm80xx_pci_mem_copy(struct pm8001_hba_info *pm8001_ha, u32 soffset, destination1 = (u32 *)destination; for (index = 0; index < dw_count; index += 4, destination1++) { - offset = (soffset + index / 4); + offset = (soffset + index); if (offset < (64 * 1024)) { value = pm8001_cr32(pm8001_ha, bus_base_number, offset); *destination1 = cpu_to_le32(value); @@ -92,9 +92,12 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, struct pm8001_hba_info *pm8001_ha = sha->lldd_ha; void __iomem *fatal_table_address = pm8001_ha->fatal_tbl_addr; u32 accum_len , reg_val, index, *temp; + u32 status = 1; unsigned long start; u8 *direct_data; char *fatal_error_data = buf; + u32 length_to_read; + u32 offset; pm8001_ha->forensic_info.data_buf.direct_data = buf; if (pm8001_ha->chip_id == chip_8001) { @@ -104,16 +107,35 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, return (char *)pm8001_ha->forensic_info.data_buf.direct_data - (char *)buf; } + /* initialize variables for very first call from host application */ if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("forensic_info TYPE_NON_FATAL..............\n")); direct_data = (u8 *)fatal_error_data; pm8001_ha->forensic_info.data_type = TYPE_NON_FATAL; pm8001_ha->forensic_info.data_buf.direct_len = SYSFS_OFFSET; + pm8001_ha->forensic_info.data_buf.direct_offset = 0; pm8001_ha->forensic_info.data_buf.read_len = 0; + pm8001_ha->forensic_preserved_accumulated_transfer = 0; - pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + /* Write signature to fatal dump table */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_SIGNATURE, 0x1234abcd); + pm8001_ha->forensic_info.data_buf.direct_data = direct_data; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("ossaHwCB: status1 %d\n", status)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("ossaHwCB: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("ossaHwCB: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("ossaHwCB: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset)); + } + if (pm8001_ha->forensic_info.data_buf.direct_offset == 0) { /* start to get data */ /* Program the MEMBASE II Shifting Register with 0x00.*/ pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, @@ -126,30 +148,66 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, /* Read until accum_len is retrived */ accum_len = pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); - PM8001_IO_DBG(pm8001_ha, pm8001_printk("accum_len 0x%x\n", - accum_len)); + /* Determine length of data between previously stored transfer length + * and current accumulated transfer length + */ + length_to_read = + accum_len - pm8001_ha->forensic_preserved_accumulated_transfer; + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: accum_len 0x%x\n", accum_len)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: length_to_read 0x%x\n", + length_to_read)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: last_offset 0x%x\n", + pm8001_ha->forensic_last_offset)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: read_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.read_len)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv:: direct_len 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_len)); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv:: direct_offset 0x%x\n", + pm8001_ha->forensic_info.data_buf.direct_offset)); + + /* If accumulated length failed to read correctly fail the attempt.*/ if (accum_len == 0xFFFFFFFF) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("Possible PCI issue 0x%x not expected\n", - accum_len)); - return -EIO; + accum_len)); + return status; } - if (accum_len == 0 || accum_len >= 0x100000) { + /* If accumulated length is zero fail the attempt */ + if (accum_len == 0) { pm8001_ha->forensic_info.data_buf.direct_data += sprintf(pm8001_ha->forensic_info.data_buf.direct_data, - "%08x ", 0xFFFFFFFF); + "%08x ", 0xFFFFFFFF); return (char *)pm8001_ha->forensic_info.data_buf.direct_data - (char *)buf; } + /* Accumulated length is good so start capturing the first data */ temp = (u32 *)pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr; if (pm8001_ha->forensic_fatal_step == 0) { moreData: + /* If data to read is less than SYSFS_OFFSET then reduce the + * length of dataLen + */ + if (pm8001_ha->forensic_last_offset + SYSFS_OFFSET + > length_to_read) { + pm8001_ha->forensic_info.data_buf.direct_len = + length_to_read - + pm8001_ha->forensic_last_offset; + } else { + pm8001_ha->forensic_info.data_buf.direct_len = + SYSFS_OFFSET; + } if (pm8001_ha->forensic_info.data_buf.direct_data) { /* Data is in bar, copy to host memory */ - pm80xx_pci_mem_copy(pm8001_ha, pm8001_ha->fatal_bar_loc, - pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, - pm8001_ha->forensic_info.data_buf.direct_len , - 1); + pm80xx_pci_mem_copy(pm8001_ha, + pm8001_ha->fatal_bar_loc, + pm8001_ha->memoryMap.region[FORENSIC_MEM].virt_ptr, + pm8001_ha->forensic_info.data_buf.direct_len, 1); } pm8001_ha->fatal_bar_loc += pm8001_ha->forensic_info.data_buf.direct_len; @@ -160,21 +218,29 @@ moreData: pm8001_ha->forensic_info.data_buf.read_len = pm8001_ha->forensic_info.data_buf.direct_len; - if (pm8001_ha->forensic_last_offset >= accum_len) { + if (pm8001_ha->forensic_last_offset >= length_to_read) { pm8001_ha->forensic_info.data_buf.direct_data += sprintf(pm8001_ha->forensic_info.data_buf.direct_data, "%08x ", 3); - for (index = 0; index < (SYSFS_OFFSET / 4); index++) { + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { pm8001_ha->forensic_info.data_buf.direct_data += - sprintf(pm8001_ha-> - forensic_info.data_buf.direct_data, - "%08x ", *(temp + index)); + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); } pm8001_ha->fatal_bar_loc = 0; pm8001_ha->forensic_fatal_step = 1; pm8001_ha->fatal_forensic_shift_offset = 0; pm8001_ha->forensic_last_offset = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv:return1 0x%x\n", offset)); return (char *)pm8001_ha-> forensic_info.data_buf.direct_data - (char *)buf; @@ -184,12 +250,20 @@ moreData: sprintf(pm8001_ha-> forensic_info.data_buf.direct_data, "%08x ", 2); - for (index = 0; index < (SYSFS_OFFSET / 4); index++) { - pm8001_ha->forensic_info.data_buf.direct_data += - sprintf(pm8001_ha-> + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4); index++) { + pm8001_ha->forensic_info.data_buf.direct_data + += sprintf(pm8001_ha-> forensic_info.data_buf.direct_data, "%08x ", *(temp + index)); } + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv:return2 0x%x\n", offset)); return (char *)pm8001_ha-> forensic_info.data_buf.direct_data - (char *)buf; @@ -199,63 +273,122 @@ moreData: pm8001_ha->forensic_info.data_buf.direct_data += sprintf(pm8001_ha->forensic_info.data_buf.direct_data, "%08x ", 2); - for (index = 0; index < 256; index++) { + for (index = 0; index < + (pm8001_ha->forensic_info.data_buf.direct_len + / 4) ; index++) { pm8001_ha->forensic_info.data_buf.direct_data += sprintf(pm8001_ha-> - forensic_info.data_buf.direct_data, - "%08x ", *(temp + index)); + forensic_info.data_buf.direct_data, + "%08x ", *(temp + index)); } pm8001_ha->fatal_forensic_shift_offset += 0x100; pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, pm8001_ha->fatal_forensic_shift_offset); pm8001_ha->fatal_bar_loc = 0; + status = 0; + offset = (int) + ((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: return3 0x%x\n", offset)); return (char *)pm8001_ha->forensic_info.data_buf.direct_data - (char *)buf; } if (pm8001_ha->forensic_fatal_step == 1) { - pm8001_ha->fatal_forensic_shift_offset = 0; - /* Read 64K of the debug data. */ - pm8001_cw32(pm8001_ha, 0, MEMBASE_II_SHIFT_REGISTER, - pm8001_ha->fatal_forensic_shift_offset); - pm8001_mw32(fatal_table_address, - MPI_FATAL_EDUMP_TABLE_HANDSHAKE, + /* store previous accumulated length before triggering next + * accumulated length update + */ + pm8001_ha->forensic_preserved_accumulated_transfer = + pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); + + /* continue capturing the fatal log until Dump status is 0x3 */ + if (pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS) < + MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { + + /* reset fddstat bit by writing to zero*/ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS, 0x0); + + /* set dump control value to '1' so that new data will + * be transferred to shared memory + */ + pm8001_mw32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_HANDSHAKE, MPI_FATAL_EDUMP_HANDSHAKE_RDY); - /* Poll FDDHSHK until clear */ - start = jiffies + (2 * HZ); /* 2 sec */ + /*Poll FDDHSHK until clear */ + start = jiffies + (2 * HZ); /* 2 sec */ - do { - reg_val = pm8001_mr32(fatal_table_address, + do { + reg_val = pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_HANDSHAKE); - } while ((reg_val) && time_before(jiffies, start)); + } while ((reg_val) && time_before(jiffies, start)); - if (reg_val != 0) { - PM8001_FAIL_DBG(pm8001_ha, - pm8001_printk("TIMEOUT:MEMBASE_II_SHIFT_REGISTER" - " = 0x%x\n", reg_val)); - return -EIO; - } - - /* Read the next 64K of the debug data. */ - pm8001_ha->forensic_fatal_step = 0; - if (pm8001_mr32(fatal_table_address, - MPI_FATAL_EDUMP_TABLE_STATUS) != - MPI_FATAL_EDUMP_TABLE_STAT_NF_SUCCESS_DONE) { - pm8001_mw32(fatal_table_address, - MPI_FATAL_EDUMP_TABLE_HANDSHAKE, 0); - goto moreData; - } else { - pm8001_ha->forensic_info.data_buf.direct_data += - sprintf(pm8001_ha-> - forensic_info.data_buf.direct_data, - "%08x ", 4); - pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF; - pm8001_ha->forensic_info.data_buf.direct_len = 0; - pm8001_ha->forensic_info.data_buf.direct_offset = 0; - pm8001_ha->forensic_info.data_buf.read_len = 0; + if (reg_val != 0) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_HDSHAKE 0x%x\n", + reg_val)); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + return((char *) + pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + } + /* Poll status register until set to 2 or + * 3 for up to 2 seconds + */ + start = jiffies + (2 * HZ); /* 2 sec */ + + do { + reg_val = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_STATUS); + } while (((reg_val != 2) && (reg_val != 3)) && + time_before(jiffies, start)); + + if (reg_val < 2) { + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk( + "TIMEOUT:MPI_FATAL_EDUMP_TABLE_STATUS = 0x%x\n", + reg_val)); + /* Fail the dump if a timeout occurs */ + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 0xFFFFFFFF); + pm8001_cw32(pm8001_ha, 0, + MEMBASE_II_SHIFT_REGISTER, + pm8001_ha->fatal_forensic_shift_offset); + } + /* Read the next block of the debug data.*/ + length_to_read = pm8001_mr32(fatal_table_address, + MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) - + pm8001_ha->forensic_preserved_accumulated_transfer; + if (length_to_read != 0x0) { + pm8001_ha->forensic_fatal_step = 0; + goto moreData; + } else { + pm8001_ha->forensic_info.data_buf.direct_data += + sprintf( + pm8001_ha->forensic_info.data_buf.direct_data, + "%08x ", 4); + pm8001_ha->forensic_info.data_buf.read_len + = 0xFFFFFFFF; + pm8001_ha->forensic_info.data_buf.direct_len + = 0; + pm8001_ha->forensic_info.data_buf.direct_offset + = 0; + pm8001_ha->forensic_info.data_buf.read_len = 0; + } } } - + offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data + - (char *)buf); + PM8001_IO_DBG(pm8001_ha, + pm8001_printk("get_fatal_spcv: return4 0x%x\n", offset)); return (char *)pm8001_ha->forensic_info.data_buf.direct_data - (char *)buf; } @@ -317,6 +450,25 @@ static void read_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_mr32(address, MAIN_MPI_ILA_RELEASE_TYPE); pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version = pm8001_mr32(address, MAIN_MPI_INACTIVE_FW_VERSION); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Main cfg table: sign:%x interface rev:%x fw_rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.signature, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.interface_rev, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.firmware_rev)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "table offset: gst:%x iq:%x oq:%x int vec:%x phy attr:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.gst_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.outbound_queue_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.int_vec_table_offset, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.phy_attr_table_offset)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Main cfg table; ila rev:%x Inactive fw rev:%x\n", + pm8001_ha->main_cfg_tbl.pm80xx_tbl.ila_version, + pm8001_ha->main_cfg_tbl.pm80xx_tbl.inc_fw_version)); } /** @@ -521,6 +673,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_mr32(addressib, (offsetib + 0x18)); pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "IQ %d pi_bar 0x%x pi_offset 0x%x\n", i, + pm8001_ha->inbnd_q_tbl[i].pi_pci_bar, + pm8001_ha->inbnd_q_tbl[i].pi_offset)); } for (i = 0; i < PM8001_MAX_SPCV_OUTB_NUM; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = @@ -549,6 +706,11 @@ static void init_default_table_values(struct pm8001_hba_info *pm8001_ha) pm8001_mr32(addressob, (offsetob + 0x18)); pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; pm8001_ha->outbnd_q_tbl[i].producer_index = 0; + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "OQ %d ci_bar 0x%x ci_offset 0x%x\n", i, + pm8001_ha->outbnd_q_tbl[i].ci_pci_bar, + pm8001_ha->outbnd_q_tbl[i].ci_offset)); } } @@ -582,6 +744,10 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) ((pm8001_ha->number_of_intr - 1) << 8); pm8001_mw32(address, MAIN_FATAL_ERROR_INTERRUPT, pm8001_ha->main_cfg_tbl.pm80xx_tbl.fatal_err_interrupt); + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Updated Fatal error interrupt vector 0x%x\n", + pm8001_mr32(address, MAIN_FATAL_ERROR_INTERRUPT))); + pm8001_mw32(address, MAIN_EVENT_CRC_CHECK, pm8001_ha->main_cfg_tbl.pm80xx_tbl.crc_core_dump); @@ -591,6 +757,9 @@ static void update_main_config_table(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping |= 0x20000000; pm8001_mw32(address, MAIN_GPIO_LED_FLAGS_OFFSET, pm8001_ha->main_cfg_tbl.pm80xx_tbl.gpio_led_mapping); + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Programming DW 0x21 in main cfg table with 0x%x\n", + pm8001_mr32(address, MAIN_GPIO_LED_FLAGS_OFFSET))); pm8001_mw32(address, MAIN_PORT_RECOVERY_TIMER, pm8001_ha->main_cfg_tbl.pm80xx_tbl.port_recovery_timer); @@ -629,6 +798,21 @@ static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); pm8001_mw32(address, offset + IB_CI_BASE_ADDR_LO_OFFSET, pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "IQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "IQ upr base addr 0x%x IQ lwr base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].lower_base_addr)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "CI upper base addr 0x%x CI lower base addr 0x%x\n", + pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr, + pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr)); } /** @@ -652,6 +836,21 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); pm8001_mw32(address, offset + OB_INTERRUPT_COALES_OFFSET, pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "OQ %d: Element pri size 0x%x\n", + number, + pm8001_ha->outbnd_q_tbl[number].element_size_cnt)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "OQ upr base addr 0x%x OQ lwr base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].lower_base_addr)); + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "PI upper base addr 0x%x PI lower base addr 0x%x\n", + pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr, + pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr)); } /** @@ -669,9 +868,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_UPDATE); /* wait until Inbound DoorBell Clear Register toggled */ if (IS_SPCV_12G(pm8001_ha->pdev)) { - max_wait_count = 4 * 1000 * 1000;/* 4 sec */ + max_wait_count = SPCV_DOORBELL_CLEAR_TIMEOUT; } else { - max_wait_count = 2 * 1000 * 1000;/* 2 sec */ + max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT; } do { udelay(1); @@ -797,7 +996,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */ - PM8001_INIT_DBG(pm8001_ha, + PM8001_DEV_DBG(pm8001_ha, pm8001_printk("Scratchpad 0 Offset: 0x%x value 0x%x\n", offset, value)); pcilogic = (value & 0xFC000000) >> 26; @@ -885,7 +1084,12 @@ pm80xx_set_thermal_config(struct pm8001_hba_info *pm8001_ha) (THERMAL_ENABLE << 8) | page_code; payload.cfg_pg[1] = (LTEMPHIL << 24) | (RTEMPHIL << 8); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Setting up thermal config. cfg_pg 0 0x%x cfg_pg 1 0x%x\n", + payload.cfg_pg[0], payload.cfg_pg[1])); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); return rc; @@ -967,7 +1171,8 @@ pm80xx_set_sas_protocol_timer_config(struct pm8001_hba_info *pm8001_ha) memcpy(&payload.cfg_pg, &SASConfigPage, sizeof(SASProtocolTimerConfig_t)); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); @@ -1090,7 +1295,12 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) payload.new_curidx_ksop = ((1 << 24) | (1 << 16) | (1 << 8) | KEK_MGMT_SUBOP_KEYCARDUPDATE); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "Saving Encryption info to flash. payload 0x%x\n", + payload.new_curidx_ksop)); + + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); @@ -1241,7 +1451,7 @@ pm80xx_chip_soft_rst(struct pm8001_hba_info *pm8001_ha) pm8001_printk("reset register before write : 0x%x\n", regval)); pm8001_cw32(pm8001_ha, 0, SPC_REG_SOFT_RESET, SPCv_NORMAL_RESET_VALUE); - mdelay(500); + msleep(500); regval = pm8001_cr32(pm8001_ha, 0, SPC_REG_SOFT_RESET); PM8001_INIT_DBG(pm8001_ha, @@ -1443,7 +1653,10 @@ static void pm80xx_send_abort_all(struct pm8001_hba_info *pm8001_ha, task_abort.device_id = cpu_to_le32(pm8001_ha_dev->device_id); task_abort.tag = cpu_to_le32(ccb_tag); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort, + sizeof(task_abort), 0); + PM8001_FAIL_DBG(pm8001_ha, + pm8001_printk("Executing abort task end\n")); if (ret) { sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); @@ -1519,7 +1732,9 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, sata_cmd.ncqtag_atap_dir_m_dad |= ((0x1 << 7) | (0x5 << 9)); memcpy(&sata_cmd.sata_fis, &fis, sizeof(struct host_to_dev_fis)); - res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, 0); + res = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd, + sizeof(sata_cmd), 0); + PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Executing read log end\n")); if (res) { sas_free_task(task); pm8001_tag_free(pm8001_ha, ccb_tag); @@ -1570,6 +1785,10 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; + + PM8001_DEV_DBG(pm8001_ha, pm8001_printk( + "tag::0x%x, status::0x%x task::0x%p\n", tag, status, t)); + /* Print sas address of IO failed device */ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && (status != IO_UNDERFLOW)) @@ -1772,7 +1991,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -1826,7 +2045,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", port_id, tag, event)); switch (event) { @@ -1963,7 +2182,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_INTERNAL_CRC_ERROR: - PM8001_IO_DBG(pm8001_ha, + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk("IO_XFR_ERROR_INTERNAL_CRC_ERROR\n")); /* TBC: used default set values */ ts->resp = SAS_TASK_COMPLETE; @@ -1974,7 +2193,7 @@ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); return; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2062,6 +2281,12 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("ts null\n")); return; } + + if (unlikely(status)) + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk( + "status:0x%x, tag:0x%x, task::0x%p\n", + status, tag, t)); + /* Print sas address of IO failed device */ if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) && (status != IO_UNDERFLOW)) { @@ -2152,7 +2377,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->buf_valid_size = sizeof(*resp); } else PM8001_IO_DBG(pm8001_ha, - pm8001_printk("response to large\n")); + pm8001_printk("response too large\n")); } if (pm8001_dev) pm8001_dev->running_req--; @@ -2365,7 +2590,7 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; @@ -2382,6 +2607,8 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); + if (t->slow_task) + complete(&t->slow_task->completion); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); @@ -2435,7 +2662,7 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) } ts = &t->task_status; - PM8001_IO_DBG(pm8001_ha, + PM8001_IOERR_DBG(pm8001_ha, pm8001_printk("port_id:0x%x, tag:0x%x, event:0x%x\n", port_id, tag, event)); switch (event) { @@ -2655,6 +2882,9 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) if (unlikely(!t || !t->lldd_task || !t->dev)) return; + PM8001_DEV_DBG(pm8001_ha, + pm8001_printk("tag::0x%x status::0x%x\n", tag, status)); + switch (status) { case IO_SUCCESS: @@ -2822,7 +3052,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: - PM8001_IO_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; @@ -2873,7 +3103,8 @@ static void pm80xx_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, ((phyId & 0xFF) << 24) | (port_id & 0xFF)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); - pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); } static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, @@ -2964,7 +3195,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_get_lrate_mode(phy, link_rate); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("unknown device type(%x)\n", deviceType)); break; } @@ -2984,7 +3215,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); if (pm8001_ha->flags == PM8001F_RUN_TIME) - mdelay(200);/*delay a moment to wait disk to spinup*/ + msleep(200);/*delay a moment to wait disk to spinup*/ pm8001_bytes_dmaed(pm8001_ha, phy_id); } @@ -3013,7 +3244,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; unsigned long flags; - PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( "port id %d, phy id %d link_rate %d portstate 0x%x\n", port_id, phy_id, link_rate, portstate)); @@ -3101,7 +3332,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) break; default: port->port_attached = 0; - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk(" Phy Down and(default) = 0x%x\n", portstate)); break; @@ -3130,8 +3361,10 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) if (status == 0) { phy->phy_state = PHY_LINK_DOWN; if (pm8001_ha->flags == PM8001F_RUN_TIME && - phy->enable_completion != NULL) + phy->enable_completion != NULL) { complete(phy->enable_completion); + phy->enable_completion = NULL; + } } return 0; @@ -3191,7 +3424,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct pm8001_port *port = &pm8001_ha->port[port_id]; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEV_DBG(pm8001_ha, pm8001_printk("portid:%d phyid:%d event:0x%x status:0x%x\n", port_id, phy_id, eventType, status)); @@ -3376,7 +3609,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); break; default: - PM8001_MSG_DBG(pm8001_ha, + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk("Unknown event type 0x%x\n", eventType)); break; } @@ -3758,7 +3991,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) ssp_coalesced_comp_resp(pm8001_ha, piomb); break; default: - PM8001_MSG_DBG(pm8001_ha, pm8001_printk( + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( "Unknown outbound Queue IOMB OPC = 0x%x\n", opc)); break; } @@ -3991,8 +4224,8 @@ static int pm80xx_chip_smp_req(struct pm8001_hba_info *pm8001_ha, build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd, pm8001_ha->smp_exp_mode, length); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, - (u32 *)&smp_cmd, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &smp_cmd, + sizeof(smp_cmd), 0); if (rc) goto err_out_2; return 0; @@ -4200,7 +4433,7 @@ static int pm80xx_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, } q_index = (u32) (pm8001_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, - &ssp_cmd, q_index); + &ssp_cmd, sizeof(ssp_cmd), q_index); return ret; } @@ -4441,7 +4674,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha, } q_index = (u32) (pm8001_ha_dev->id & 0x00ffffff) % PM8001_MAX_OUTB_NUM; ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, - &sata_cmd, q_index); + &sata_cmd, sizeof(sata_cmd), q_index); return ret; } @@ -4465,23 +4698,9 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PHY START REQ for phy_id %d\n", phy_id)); - /* - ** [0:7] PHY Identifier - ** [8:11] link rate 1.5G, 3G, 6G - ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b Auto mode - ** [14] 0b disable spin up hold; 1b enable spin up hold - ** [15] ob no change in current PHY analig setup 1b enable using SPAST - */ - if (!IS_SPCV_12G(pm8001_ha->pdev)) - payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | - LINKMODE_AUTO | LINKRATE_15 | - LINKRATE_30 | LINKRATE_60 | phy_id); - else - payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | - LINKMODE_AUTO | LINKRATE_15 | - LINKRATE_30 | LINKRATE_60 | LINKRATE_120 | - phy_id); + payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | + LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); /* SSC Disable and SAS Analog ST configuration */ /** payload.ase_sh_lm_slr_phyid = @@ -4494,9 +4713,10 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, - &pm8001_ha->phy[phy_id].dev_sas_addr, SAS_ADDR_SIZE); + &pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, + sizeof(payload), 0); return ret; } @@ -4518,7 +4738,8 @@ static int pm80xx_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); - ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, 0); + ret = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload, + sizeof(payload), 0); return ret; } @@ -4584,7 +4805,8 @@ static int pm80xx_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); @@ -4614,7 +4836,8 @@ static int pm80xx_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, payload.tag = cpu_to_le32(tag); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xFF) << 8) | (phyId & 0xFF)); - return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + return pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); } static u32 pm80xx_chip_is_our_interrupt(struct pm8001_hba_info *pm8001_ha) @@ -4641,6 +4864,9 @@ static irqreturn_t pm80xx_chip_isr(struct pm8001_hba_info *pm8001_ha, u8 vec) { pm80xx_chip_interrupt_disable(pm8001_ha, vec); + PM8001_DEVIO_DBG(pm8001_ha, pm8001_printk( + "irq vec %d, ODMR:0x%x\n", + vec, pm8001_cr32(pm8001_ha, 0, 0x30))); process_oq(pm8001_ha, vec); pm80xx_chip_interrupt_enable(pm8001_ha, vec); return IRQ_HANDLED; @@ -4669,7 +4895,8 @@ void mpi_set_phy_profile_req(struct pm8001_hba_info *pm8001_ha, payload.reserved[j] = cpu_to_le32(*((u32 *)buf + i)); j++; } - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); } @@ -4711,7 +4938,8 @@ void pm8001_set_phy_profile_single(struct pm8001_hba_info *pm8001_ha, for (i = 0; i < length; i++) payload.reserved[i] = cpu_to_le32(*(buf + i)); - rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, 0); + rc = pm8001_mpi_build_cmd(pm8001_ha, circularQ, opc, &payload, + sizeof(payload), 0); if (rc) pm8001_tag_free(pm8001_ha, tag); diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h index dc9ab7689060..701951a0f715 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.h +++ b/drivers/scsi/pm8001/pm80xx_hwi.h @@ -220,6 +220,9 @@ #define SAS_DOPNRJT_RTRY_TMO 128 #define SAS_COPNRJT_RTRY_TMO 128 +#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */ +#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */ + /* Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second. Assuming a bigger value 3 second, 3000000/128 = 23437.5 where 128 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 71ff3936da4f..7eb88fe1eb0b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -3973,9 +3973,7 @@ static const struct file_operations pmcraid_fops = { .open = pmcraid_chr_open, .fasync = pmcraid_chr_fasync, .unlocked_ioctl = pmcraid_chr_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = pmcraid_chr_ioctl, -#endif + .compat_ioctl = compat_ptr_ioctl, .llseek = noop_llseek, }; @@ -5841,7 +5839,7 @@ out_disable_device: } /* - * PCI driver structure of pcmraid driver + * PCI driver structure of pmcraid driver */ static struct pci_driver pmcraid_driver = { .name = PMCRAID_DRIVER_NAME, diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h index 5a021217bfc9..f3f399fe10c8 100644 --- a/drivers/scsi/qedf/qedf.h +++ b/drivers/scsi/qedf/qedf.h @@ -49,6 +49,7 @@ #define QEDF_ABORT_TIMEOUT (10 * 1000) #define QEDF_CLEANUP_TIMEOUT 1 #define QEDF_MAX_CDB_LEN 16 +#define QEDF_LL2_BUF_SIZE 2500 /* Buffer size required for LL2 Rx */ #define UPSTREAM_REMOVE 1 #define UPSTREAM_KEEP 1 diff --git a/drivers/scsi/qedf/qedf_dbg.h b/drivers/scsi/qedf/qedf_dbg.h index d979f095aeda..2386bfb73c46 100644 --- a/drivers/scsi/qedf/qedf_dbg.h +++ b/drivers/scsi/qedf/qedf_dbg.h @@ -42,7 +42,7 @@ extern uint qedf_debug; #define QEDF_LOG_LPORT 0x4000 /* lport logs */ #define QEDF_LOG_ELS 0x8000 /* ELS logs */ #define QEDF_LOG_NPIV 0x10000 /* NPIV logs */ -#define QEDF_LOG_SESS 0x20000 /* Conection setup, cleanup */ +#define QEDF_LOG_SESS 0x20000 /* Connection setup, cleanup */ #define QEDF_LOG_TID 0x80000 /* * FW TID context acquire * free diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c index d905a307302d..b88bed9bb133 100644 --- a/drivers/scsi/qedf/qedf_debugfs.c +++ b/drivers/scsi/qedf/qedf_debugfs.c @@ -47,13 +47,13 @@ qedf_dbg_host_init(struct qedf_dbg_ctx *qedf, * @pf: the pf that is stopping **/ void -qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf) +qedf_dbg_host_exit(struct qedf_dbg_ctx *qedf_dbg) { - QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Destroying debugfs host " + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Destroying debugfs host " "entry\n"); /* remove debugfs entries of this PF */ - debugfs_remove_recursive(qedf->bdf_dentry); - qedf->bdf_dentry = NULL; + debugfs_remove_recursive(qedf_dbg->bdf_dentry); + qedf_dbg->bdf_dentry = NULL; } /** @@ -140,10 +140,10 @@ qedf_dbg_debug_cmd_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { int cnt; - struct qedf_dbg_ctx *qedf = + struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; - QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "entered\n"); + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "debug mask=0x%x\n", qedf_debug); cnt = sprintf(buffer, "debug mask = 0x%x\n", qedf_debug); cnt = min_t(int, count, cnt - *ppos); @@ -158,7 +158,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, uint32_t val; void *kern_buf; int rval; - struct qedf_dbg_ctx *qedf = + struct qedf_dbg_ctx *qedf_dbg = (struct qedf_dbg_ctx *)filp->private_data; if (!count || *ppos) @@ -178,7 +178,7 @@ qedf_dbg_debug_cmd_write(struct file *filp, const char __user *buffer, else qedf_debug = val; - QEDF_INFO(qedf, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); + QEDF_INFO(qedf_dbg, QEDF_LOG_DEBUGFS, "Setting debug=0x%x.\n", val); return count; } diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c index 5996f68fbf2b..87e169dcebdb 100644 --- a/drivers/scsi/qedf/qedf_els.c +++ b/drivers/scsi/qedf/qedf_els.c @@ -179,8 +179,11 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg) orig_io_req = cb_arg->aborted_io_req; - if (!orig_io_req) + if (!orig_io_req) { + QEDF_ERR(&qedf->dbg_ctx, + "Original io_req is NULL, rrq_req = %p.\n", rrq_req); goto out_free; + } if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO && rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) @@ -350,8 +353,10 @@ void qedf_restart_rport(struct qedf_rport *fcport) u32 port_id; unsigned long flags; - if (!fcport) + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL.\n"); return; + } spin_lock_irqsave(&fcport->rport_lock, flags); if (test_bit(QEDF_RPORT_IN_RESET, &fcport->flags) || @@ -418,8 +423,11 @@ static void qedf_l2_els_compl(struct qedf_els_cb_arg *cb_arg) * If we are flushing the command just free the cb_arg as none of the * response data will be valid. */ - if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) + if (els_req->event == QEDF_IOREQ_EV_ELS_FLUSH) { + QEDF_ERR(NULL, "els_req xid=0x%x event is flush.\n", + els_req->xid); goto free_arg; + } fcport = els_req->fcport; mp_req = &(els_req->mp_req); @@ -532,8 +540,10 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) orig_io_req = cb_arg->aborted_io_req; - if (!orig_io_req) + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); goto out_free; + } clear_bit(QEDF_CMD_SRR_SENT, &orig_io_req->flags); @@ -547,8 +557,11 @@ static void qedf_srr_compl(struct qedf_els_cb_arg *cb_arg) orig_io_req, orig_io_req->xid, srr_req->xid, refcount); /* If a SRR times out, simply free resources */ - if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) + if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) { + QEDF_ERR(&qedf->dbg_ctx, + "ELS timeout rec_xid=0x%x.\n", srr_req->xid); goto out_put; + } /* Normalize response data into struct fc_frame */ mp_req = &(srr_req->mp_req); @@ -721,8 +734,11 @@ void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf, cb_arg = io_req->cb_arg; /* If we timed out just free resources */ - if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) + if (io_req->event == QEDF_IOREQ_EV_ELS_TMO || !cqe) { + QEDF_ERR(&qedf->dbg_ctx, + "cqe is NULL or timeout event (0x%x)", io_req->event); goto free; + } /* Kill the timer we put on the request */ cancel_delayed_work_sync(&io_req->timeout_work); @@ -825,8 +841,10 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) orig_io_req = cb_arg->aborted_io_req; - if (!orig_io_req) + if (!orig_io_req) { + QEDF_ERR(NULL, "orig_io_req is NULL.\n"); goto out_free; + } if (rec_req->event != QEDF_IOREQ_EV_ELS_TMO && rec_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT) @@ -838,8 +856,12 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg) orig_io_req, orig_io_req->xid, rec_req->xid, refcount); /* If a REC times out, free resources */ - if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) + if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) { + QEDF_ERR(&qedf->dbg_ctx, + "Got TMO event, orig_io_req %p orig_io_xid=0x%x.\n", + orig_io_req, orig_io_req->xid); goto out_put; + } /* Normalize response data into struct fc_frame */ mp_req = &(rec_req->mp_req); diff --git a/drivers/scsi/qedf/qedf_fip.c b/drivers/scsi/qedf/qedf_fip.c index 362d2bed72fb..bb82f0875eca 100644 --- a/drivers/scsi/qedf/qedf_fip.c +++ b/drivers/scsi/qedf/qedf_fip.c @@ -23,8 +23,11 @@ void qedf_fcoe_send_vlan_req(struct qedf_ctx *qedf) int rc = -1; skb = dev_alloc_skb(sizeof(struct fip_vlan)); - if (!skb) + if (!skb) { + QEDF_ERR(&qedf->dbg_ctx, + "Failed to allocate skb.\n"); return; + } eth_fr = (char *)skb->data; vlan = (struct fip_vlan *)eth_fr; @@ -250,18 +253,24 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb) fc_wwpn_valid = true; break; case FIP_DT_VN_ID: + fabric_id_valid = false; vp = (struct fip_vn_desc *)desc; - QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, - "vx_port fd_fc_id=%x fd_mac=%pM.\n", - ntoh24(vp->fd_fc_id), vp->fd_mac); - /* Check vx_port fabric ID */ - if (ntoh24(vp->fd_fc_id) != - qedf->lport->port_id) - fabric_id_valid = false; - /* Check vx_port MAC */ - if (!ether_addr_equal(vp->fd_mac, - qedf->data_src_addr)) - fabric_id_valid = false; + + QEDF_ERR(&qedf->dbg_ctx, + "CVL vx_port fd_fc_id=0x%x fd_mac=%pM fd_wwpn=%016llx.\n", + ntoh24(vp->fd_fc_id), vp->fd_mac, + get_unaligned_be64(&vp->fd_wwpn)); + /* Check for vx_port wwpn OR Check vx_port + * fabric ID OR Check vx_port MAC + */ + if ((get_unaligned_be64(&vp->fd_wwpn) == + qedf->wwpn) || + (ntoh24(vp->fd_fc_id) == + qedf->lport->port_id) || + (ether_addr_equal(vp->fd_mac, + qedf->data_src_addr))) { + fabric_id_valid = true; + } break; default: /* Ignore anything else */ diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c index d881e822f92c..e749a2dcaad7 100644 --- a/drivers/scsi/qedf/qedf_io.c +++ b/drivers/scsi/qedf/qedf_io.c @@ -104,6 +104,8 @@ static void qedf_cmd_timeout(struct work_struct *work) qedf_process_seq_cleanup_compl(qedf, NULL, io_req); break; default: + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Hit default case, xid=0x%x.\n", io_req->xid); break; } } @@ -122,8 +124,10 @@ void qedf_cmd_mgr_free(struct qedf_cmd_mgr *cmgr) num_ios = max_xid - min_xid + 1; /* Free fcoe_bdt_ctx structures */ - if (!cmgr->io_bdt_pool) + if (!cmgr->io_bdt_pool) { + QEDF_ERR(&qedf->dbg_ctx, "io_bdt_pool is NULL.\n"); goto free_cmd_pool; + } bd_tbl_sz = QEDF_MAX_BDS_PER_CMD * sizeof(struct scsi_sge); for (i = 0; i < num_ios; i++) { @@ -226,8 +230,11 @@ struct qedf_cmd_mgr *qedf_cmd_mgr_alloc(struct qedf_ctx *qedf) io_req->sense_buffer = dma_alloc_coherent(&qedf->pdev->dev, QEDF_SCSI_SENSE_BUFFERSIZE, &io_req->sense_buffer_dma, GFP_KERNEL); - if (!io_req->sense_buffer) + if (!io_req->sense_buffer) { + QEDF_ERR(&qedf->dbg_ctx, + "Failed to alloc sense buffer.\n"); goto mem_err; + } /* Allocate task parameters to pass to f/w init funcions */ io_req->task_params = kzalloc(sizeof(*io_req->task_params), @@ -437,8 +444,12 @@ void qedf_release_cmd(struct kref *ref) struct qedf_rport *fcport = io_req->fcport; unsigned long flags; - if (io_req->cmd_type == QEDF_SCSI_CMD) + if (io_req->cmd_type == QEDF_SCSI_CMD) { + QEDF_WARN(&fcport->qedf->dbg_ctx, + "Cmd released called without scsi_done called, io_req %p xid=0x%x.\n", + io_req, io_req->xid); WARN_ON(io_req->sc_cmd); + } if (io_req->cmd_type == QEDF_ELS || io_req->cmd_type == QEDF_TASK_MGMT_CMD) @@ -447,8 +458,10 @@ void qedf_release_cmd(struct kref *ref) atomic_inc(&cmd_mgr->free_list_cnt); atomic_dec(&fcport->num_active_ios); atomic_set(&io_req->state, QEDF_CMD_ST_INACTIVE); - if (atomic_read(&fcport->num_active_ios) < 0) + if (atomic_read(&fcport->num_active_ios) < 0) { QEDF_WARN(&(fcport->qedf->dbg_ctx), "active_ios < 0.\n"); + WARN_ON(1); + } /* Increment task retry identifier now that the request is released */ io_req->task_retry_identifier++; @@ -951,6 +964,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) if (test_bit(QEDF_UNLOADING, &qedf->flags) || test_bit(QEDF_DBG_STOP_IO, &qedf->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "Returning DNC as unloading or stop io, flags 0x%lx.\n", + qedf->flags); sc_cmd->result = DID_NO_CONNECT << 16; sc_cmd->scsi_done(sc_cmd); return 0; @@ -967,6 +983,9 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) rval = fc_remote_port_chkready(rport); if (rval) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "fc_remote_port_chkready failed=0x%x for port_id=0x%06x.\n", + rval, rport->port_id); sc_cmd->result = rval; sc_cmd->scsi_done(sc_cmd); return 0; @@ -974,12 +993,14 @@ qedf_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc_cmd) /* Retry command if we are doing a qed drain operation */ if (test_bit(QEDF_DRAIN_ACTIVE, &qedf->flags)) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Drain active.\n"); rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd; } if (lport->state != LPORT_ST_READY || atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "Link down.\n"); rc = SCSI_MLQUEUE_HOST_BUSY; goto exit_qcmd; } @@ -1297,8 +1318,10 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req, struct scsi_cmnd *sc_cmd; int refcount; - if (!io_req) + if (!io_req) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, "io_req is NULL\n"); return; + } if (test_and_set_bit(QEDF_CMD_ERR_SCSI_DONE, &io_req->flags)) { QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, @@ -1414,8 +1437,12 @@ void qedf_process_warning_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, u64 err_warn_bit_map; u8 err_warn = 0xff; - if (!cqe) + if (!cqe) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "cqe is NULL for io_req %p xid=0x%x\n", + io_req, io_req->xid); return; + } QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Warning CQE, " "xid=0x%x\n", io_req->xid); @@ -1477,8 +1504,11 @@ void qedf_process_error_detect(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, { int rval; - if (!cqe) + if (!cqe) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO, + "cqe is NULL for io_req %p\n", io_req); return; + } QEDF_ERR(&(io_req->fcport->qedf->dbg_ctx), "Error detection CQE, " "xid=0x%x\n", io_req->xid); @@ -1543,8 +1573,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun) int wait_cnt = 100; int refcount = 0; - if (!fcport) + if (!fcport) { + QEDF_ERR(NULL, "fcport is NULL\n"); return; + } /* Check that fcport is still offloaded */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { @@ -1976,6 +2008,10 @@ void qedf_process_abts_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe, clear_bit(QEDF_CMD_IN_ABORT, &io_req->flags); if (io_req->sc_cmd) { + if (!io_req->return_scsi_cmd_on_abts) + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "Not call scsi_done for xid=0x%x.\n", + io_req->xid); if (io_req->return_scsi_cmd_on_abts) qedf_scsi_done(qedf, io_req, DID_ERROR); } @@ -2201,6 +2237,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req, } if (io_req->sc_cmd) { + if (!io_req->return_scsi_cmd_on_abts) + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_SCSI_TM, + "Not call scsi_done for xid=0x%x.\n", + io_req->xid); if (io_req->return_scsi_cmd_on_abts) qedf_scsi_done(qedf, io_req, DID_ERROR); } @@ -2241,7 +2281,7 @@ static int qedf_execute_tmf(struct qedf_rport *fcport, struct scsi_cmnd *sc_cmd, u16 sqe_idx; if (!sc_cmd) { - QEDF_ERR(&(qedf->dbg_ctx), "invalid arg\n"); + QEDF_ERR(&qedf->dbg_ctx, "sc_cmd is NULL\n"); return FAILED; } @@ -2363,8 +2403,8 @@ int qedf_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags) QEDF_ERR(NULL, "tm_flags 0x%x sc_cmd %p op = 0x%02x target_id = 0x%x lun=%d\n", - tm_flags, sc_cmd, sc_cmd->cmnd[0], rport->scsi_target_id, - (int)sc_cmd->device->lun); + tm_flags, sc_cmd, sc_cmd->cmd_len ? sc_cmd->cmnd[0] : 0xff, + rport->scsi_target_id, (int)sc_cmd->device->lun); if (!rdata || !kref_get_unless_zero(&rdata->kref)) { QEDF_ERR(NULL, "stale rport\n"); @@ -2515,6 +2555,11 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx, fh = (struct fc_frame_header *)fc_frame_header_get(fp); memcpy(fh, (void *)bdq_addr, pktlen); + QEDF_WARN(&qedf->dbg_ctx, + "Processing Unsolicated frame, src=%06x dest=%06x r_ctl=0x%x type=0x%x cmd=%02x\n", + ntoh24(fh->fh_s_id), ntoh24(fh->fh_d_id), fh->fh_r_ctl, + fh->fh_type, fc_frame_payload_op(fp)); + /* Initialize the frame so libfc sees it as a valid frame */ crc = fcoe_fc_crc(fp); fc_frame_init(fp); diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index a42babde036d..604856e72cfb 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -27,6 +27,7 @@ const struct qed_fcoe_ops *qed_ops; static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void qedf_remove(struct pci_dev *pdev); +static void qedf_shutdown(struct pci_dev *pdev); /* * Driver module parameters. @@ -110,16 +111,18 @@ static struct kmem_cache *qedf_io_work_cache; void qedf_set_vlan_id(struct qedf_ctx *qedf, int vlan_id) { - qedf->vlan_id = vlan_id; - qedf->vlan_id |= qedf->prio << VLAN_PRIO_SHIFT; - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Setting vlan_id=%04x " - "prio=%d.\n", vlan_id, qedf->prio); + int vlan_id_tmp = 0; + + vlan_id_tmp = vlan_id | (qedf->prio << VLAN_PRIO_SHIFT); + qedf->vlan_id = vlan_id_tmp; + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Setting vlan_id=0x%04x prio=%d.\n", + vlan_id_tmp, qedf->prio); } /* Returns true if we have a valid vlan, false otherwise */ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) { - int rc; while (qedf->fipvlan_retries--) { /* This is to catch if link goes down during fipvlan retries */ @@ -128,20 +131,25 @@ static bool qedf_initiate_fipvlan_req(struct qedf_ctx *qedf) return false; } - if (qedf->vlan_id > 0) + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, "Driver unloading.\n"); + return false; + } + + if (qedf->vlan_id > 0) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "vlan = 0x%x already set, calling ctlr_link_up.\n", + qedf->vlan_id); + if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) + fcoe_ctlr_link_up(&qedf->ctlr); return true; + } QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Retry %d.\n", qedf->fipvlan_retries); init_completion(&qedf->fipvlan_compl); qedf_fcoe_send_vlan_req(qedf); - rc = wait_for_completion_timeout(&qedf->fipvlan_compl, - 1 * HZ); - if (rc > 0 && - (atomic_read(&qedf->link_state) == QEDF_LINK_UP)) { - fcoe_ctlr_link_up(&qedf->ctlr); - return true; - } + wait_for_completion_timeout(&qedf->fipvlan_compl, 1 * HZ); } return false; @@ -162,6 +170,8 @@ static void qedf_handle_link_update(struct work_struct *work) return; if (atomic_read(&qedf->link_state) != QEDF_LINK_UP) { + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_DISC, + "Link is down, resetting vlan_id.\n"); qedf->vlan_id = 0; return; } @@ -311,8 +321,10 @@ int qedf_send_flogi(struct qedf_ctx *qedf) lport = qedf->lport; - if (!lport->tt.elsct_send) + if (!lport->tt.elsct_send) { + QEDF_ERR(&qedf->dbg_ctx, "tt.elsct_send not set.\n"); return -EINVAL; + } fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi)); if (!fp) { @@ -330,11 +342,6 @@ int qedf_send_flogi(struct qedf_ctx *qedf) return 0; } -struct qedf_tmp_rdata_item { - struct fc_rport_priv *rdata; - struct list_head list; -}; - /* * This function is called if link_down_tmo is in use. If we get a link up and * link_down_tmo has not expired then use just FLOGI/ADISC to recover our @@ -344,9 +351,8 @@ static void qedf_link_recovery(struct work_struct *work) { struct qedf_ctx *qedf = container_of(work, struct qedf_ctx, link_recovery.work); - struct qedf_rport *fcport; + struct fc_lport *lport = qedf->lport; struct fc_rport_priv *rdata; - struct qedf_tmp_rdata_item *rdata_item, *tmp_rdata_item; bool rc; int retries = 30; int rval, i; @@ -413,33 +419,14 @@ static void qedf_link_recovery(struct work_struct *work) * Call lport->tt.rport_login which will cause libfc to send an * ADISC since the rport is in state ready. */ - rcu_read_lock(); - list_for_each_entry_rcu(fcport, &qedf->fcports, peers) { - rdata = fcport->rdata; - if (rdata == NULL) - continue; - rdata_item = kzalloc(sizeof(struct qedf_tmp_rdata_item), - GFP_ATOMIC); - if (!rdata_item) - continue; + mutex_lock(&lport->disc.disc_mutex); + list_for_each_entry_rcu(rdata, &lport->disc.rports, peers) { if (kref_get_unless_zero(&rdata->kref)) { - rdata_item->rdata = rdata; - list_add(&rdata_item->list, &rdata_login_list); - } else - kfree(rdata_item); - } - rcu_read_unlock(); - /* - * Do the fc_rport_login outside of the rcu lock so we don't take a - * mutex in an atomic context. - */ - list_for_each_entry_safe(rdata_item, tmp_rdata_item, &rdata_login_list, - list) { - list_del(&rdata_item->list); - fc_rport_login(rdata_item->rdata); - kref_put(&rdata_item->rdata->kref, fc_rport_destroy); - kfree(rdata_item); + fc_rport_login(rdata); + kref_put(&rdata->kref, fc_rport_destroy); + } } + mutex_unlock(&lport->disc.disc_mutex); } static void qedf_update_link_speed(struct qedf_ctx *qedf, @@ -467,6 +454,9 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf, case 100000: lport->link_speed = FC_PORTSPEED_100GBIT; break; + case 20000: + lport->link_speed = FC_PORTSPEED_20GBIT; + break; default: lport->link_speed = FC_PORTSPEED_UNKNOWN; break; @@ -476,16 +466,40 @@ static void qedf_update_link_speed(struct qedf_ctx *qedf, * Set supported link speed by querying the supported * capabilities of the link. */ - if (link->supported_caps & SUPPORTED_10000baseKR_Full) + if ((link->supported_caps & QED_LM_10000baseT_Full_BIT) || + (link->supported_caps & QED_LM_10000baseKX4_Full_BIT) || + (link->supported_caps & QED_LM_10000baseR_FEC_BIT) || + (link->supported_caps & QED_LM_10000baseCR_Full_BIT) || + (link->supported_caps & QED_LM_10000baseSR_Full_BIT) || + (link->supported_caps & QED_LM_10000baseLR_Full_BIT) || + (link->supported_caps & QED_LM_10000baseLRM_Full_BIT) || + (link->supported_caps & QED_LM_10000baseKR_Full_BIT)) { lport->link_supported_speeds |= FC_PORTSPEED_10GBIT; - if (link->supported_caps & SUPPORTED_25000baseKR_Full) + } + if ((link->supported_caps & QED_LM_25000baseKR_Full_BIT) || + (link->supported_caps & QED_LM_25000baseCR_Full_BIT) || + (link->supported_caps & QED_LM_25000baseSR_Full_BIT)) { lport->link_supported_speeds |= FC_PORTSPEED_25GBIT; - if (link->supported_caps & SUPPORTED_40000baseLR4_Full) + } + if ((link->supported_caps & QED_LM_40000baseLR4_Full_BIT) || + (link->supported_caps & QED_LM_40000baseKR4_Full_BIT) || + (link->supported_caps & QED_LM_40000baseCR4_Full_BIT) || + (link->supported_caps & QED_LM_40000baseSR4_Full_BIT)) { lport->link_supported_speeds |= FC_PORTSPEED_40GBIT; - if (link->supported_caps & SUPPORTED_50000baseKR2_Full) + } + if ((link->supported_caps & QED_LM_50000baseKR2_Full_BIT) || + (link->supported_caps & QED_LM_50000baseCR2_Full_BIT) || + (link->supported_caps & QED_LM_50000baseSR2_Full_BIT)) { lport->link_supported_speeds |= FC_PORTSPEED_50GBIT; - if (link->supported_caps & SUPPORTED_100000baseKR4_Full) + } + if ((link->supported_caps & QED_LM_100000baseKR4_Full_BIT) || + (link->supported_caps & QED_LM_100000baseSR4_Full_BIT) || + (link->supported_caps & QED_LM_100000baseCR4_Full_BIT) || + (link->supported_caps & QED_LM_100000baseLR4_ER4_Full_BIT)) { lport->link_supported_speeds |= FC_PORTSPEED_100GBIT; + } + if (link->supported_caps & QED_LM_20000baseKR2_Full_BIT) + lport->link_supported_speeds |= FC_PORTSPEED_20GBIT; fc_host_supported_speeds(lport->host) = lport->link_supported_speeds; } @@ -493,6 +507,16 @@ static void qedf_link_update(void *dev, struct qed_link_output *link) { struct qedf_ctx *qedf = (struct qedf_ctx *)dev; + /* + * Prevent race where we're removing the module and we get link update + * for qed. + */ + if (test_bit(QEDF_UNLOADING, &qedf->flags)) { + QEDF_ERR(&qedf->dbg_ctx, + "Ignore link update, driver getting unload.\n"); + return; + } + if (link->link_up) { if (atomic_read(&qedf->link_state) == QEDF_LINK_UP) { QEDF_INFO((&qedf->dbg_ctx), QEDF_LOG_DISC, @@ -572,7 +596,7 @@ static void qedf_dcbx_handler(void *dev, struct qed_dcbx_get *get, u32 mib_type) tmp_prio = get->operational.app_prio.fcoe; if (qedf_default_prio > -1) qedf->prio = qedf_default_prio; - else if (tmp_prio < 0 || tmp_prio > 7) { + else if (tmp_prio > 7) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "FIP/FCoE prio %d out of range, setting to %d.\n", tmp_prio, QEDF_DEFAULT_PRIO); @@ -1077,7 +1101,7 @@ static int qedf_xmit(struct fc_lport *lport, struct fc_frame *fp) return -ENOMEM; } frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; - cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset; + cp = kmap_atomic(skb_frag_page(frag)) + skb_frag_off(frag); } else { cp = skb_put(skb, tlen); } @@ -1902,6 +1926,13 @@ static int qedf_fcoe_reset(struct Scsi_Host *shost) return 0; } +static void qedf_get_host_port_id(struct Scsi_Host *shost) +{ + struct fc_lport *lport = shost_priv(shost); + + fc_host_port_id(shost) = lport->port_id; +} + static struct fc_host_statistics *qedf_fc_get_host_stats(struct Scsi_Host *shost) { @@ -1972,6 +2003,7 @@ static struct fc_function_template qedf_fc_transport_fn = { .show_host_active_fc4s = 1, .show_host_maxframe_size = 1, + .get_host_port_id = qedf_get_host_port_id, .show_host_port_id = 1, .show_host_supported_speeds = 1, .get_host_speed = fc_get_host_speed, @@ -2340,12 +2372,14 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, fr_dev(fp) = lport; fr_sof(fp) = hp->fcoe_sof; if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { + QEDF_INFO(NULL, QEDF_LOG_LL2, "skb_copy_bits failed.\n"); kfree_skb(skb); return; } fr_eof(fp) = crc_eof.fcoe_eof; fr_crc(fp) = crc_eof.fcoe_crc32; if (pskb_trim(skb, fr_len)) { + QEDF_INFO(NULL, QEDF_LOG_LL2, "pskb_trim failed.\n"); kfree_skb(skb); return; } @@ -2406,9 +2440,9 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, * empty then this is not addressed to our port so simply drop it. */ if (lport->port_id != ntoh24(fh->fh_d_id) && !vn_port) { - QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2, - "Dropping frame due to destination mismatch: lport->port_id=%x fh->d_id=%x.\n", - lport->port_id, ntoh24(fh->fh_d_id)); + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping frame due to destination mismatch: lport->port_id=0x%x fh->d_id=0x%x.\n", + lport->port_id, ntoh24(fh->fh_d_id)); kfree_skb(skb); return; } @@ -2417,6 +2451,8 @@ static void qedf_recv_frame(struct qedf_ctx *qedf, if ((fh->fh_type == FC_TYPE_BLS) && (f_ctl & FC_FC_SEQ_CTX) && (f_ctl & FC_FC_EX_CTX)) { /* Drop incoming ABTS response that has both SEQ/EX CTX set */ + QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_LL2, + "Dropping ABTS response as both SEQ/EX CTX set.\n"); kfree_skb(skb); return; } @@ -2560,8 +2596,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sizeof(struct status_block_e4), &sb_phys, GFP_KERNEL); if (!sb_virt) { - QEDF_ERR(&(qedf->dbg_ctx), "Status block allocation failed " - "for id = %d.\n", sb_id); + QEDF_ERR(&qedf->dbg_ctx, + "Status block allocation failed for id = %d.\n", + sb_id); return -ENOMEM; } @@ -2569,8 +2606,9 @@ static int qedf_alloc_and_init_sb(struct qedf_ctx *qedf, sb_id, QED_SB_TYPE_STORAGE); if (ret) { - QEDF_ERR(&(qedf->dbg_ctx), "Status block initialization " - "failed for id = %d.\n", sb_id); + QEDF_ERR(&qedf->dbg_ctx, + "Status block initialization failed (0x%x) for id = %d.\n", + ret, sb_id); return ret; } @@ -2654,13 +2692,18 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) io_req = &qedf->cmd_mgr->cmds[xid]; /* Completion not for a valid I/O anymore so just return */ - if (!io_req) + if (!io_req) { + QEDF_ERR(&qedf->dbg_ctx, + "io_req is NULL for xid=0x%x.\n", xid); return; + } fcport = io_req->fcport; if (fcport == NULL) { - QEDF_ERR(&(qedf->dbg_ctx), "fcport is NULL.\n"); + QEDF_ERR(&qedf->dbg_ctx, + "fcport is NULL for xid=0x%x io_req=%p.\n", + xid, io_req); return; } @@ -2669,7 +2712,8 @@ void qedf_process_cqe(struct qedf_ctx *qedf, struct fcoe_cqe *cqe) * isn't valid and shouldn't be taken. We should just return. */ if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) { - QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n"); + QEDF_ERR(&qedf->dbg_ctx, + "Session not offloaded yet, fcport = %p.\n", fcport); return; } @@ -2881,6 +2925,7 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) */ if (!qedf->p_cpuq) { status = 1; + QEDF_ERR(&qedf->dbg_ctx, "p_cpuq is NULL.\n"); goto mem_alloc_failure; } @@ -2896,8 +2941,10 @@ static int qedf_alloc_global_queues(struct qedf_ctx *qedf) /* Allocate DMA coherent buffers for BDQ */ rc = qedf_alloc_bdq(qedf); - if (rc) + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "Unable to allocate bdq.\n"); goto mem_alloc_failure; + } /* Allocate a CQ and an associated PBL for each MSI-X vector */ for (i = 0; i < qedf->num_queues; i++) { @@ -3107,6 +3154,7 @@ static struct pci_driver qedf_pci_driver = { .id_table = qedf_pci_tbl, .probe = qedf_probe, .remove = qedf_remove, + .shutdown = qedf_shutdown, }; static int __qedf_probe(struct pci_dev *pdev, int mode) @@ -3209,6 +3257,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) qed_params.is_vf = is_vf; qedf->cdev = qed_ops->common->probe(pdev, &qed_params); if (!qedf->cdev) { + QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n"); rc = -ENODEV; goto err1; } @@ -3277,8 +3326,10 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) /* Setup interrupts */ rc = qedf_setup_int(qedf); - if (rc) + if (rc) { + QEDF_ERR(&qedf->dbg_ctx, "Setup interrupts failed.\n"); goto err3; + } rc = qed_ops->start(qedf->cdev, &qedf->tasks); if (rc) { @@ -3360,7 +3411,7 @@ static int __qedf_probe(struct pci_dev *pdev, int mode) } memset(¶ms, 0, sizeof(params)); - params.mtu = 9000; + params.mtu = QEDF_LL2_BUF_SIZE; ether_addr_copy(params.ll2_mac_address, qedf->mac); /* Start LL2 processing thread */ @@ -3719,6 +3770,11 @@ void qedf_get_protocol_tlv_data(void *dev, void *data) fcoe->scsi_tsk_full = qedf->task_set_fulls; } +static void qedf_shutdown(struct pci_dev *pdev) +{ + __qedf_remove(pdev, QEDF_MODE_NORMAL); +} + /* Generic TLV data callback */ void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data) { @@ -3845,7 +3901,7 @@ static void __exit qedf_cleanup(void) } MODULE_LICENSE("GPL"); -MODULE_DESCRIPTION("QLogic QEDF 25/40/50/100Gb FCoE Driver"); +MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx FCoE Module"); MODULE_AUTHOR("QLogic Corporation"); MODULE_VERSION(QEDF_VERSION); module_init(qedf_init); diff --git a/drivers/scsi/qedf/qedf_version.h b/drivers/scsi/qedf/qedf_version.h index e57533de7e96..b0e37afe5bbb 100644 --- a/drivers/scsi/qedf/qedf_version.h +++ b/drivers/scsi/qedf/qedf_version.h @@ -4,9 +4,9 @@ * Copyright (c) 2016-2018 Cavium Inc. */ -#define QEDF_VERSION "8.37.25.20" +#define QEDF_VERSION "8.42.3.0" #define QEDF_DRIVER_MAJOR_VER 8 -#define QEDF_DRIVER_MINOR_VER 37 -#define QEDF_DRIVER_REV_VER 25 -#define QEDF_DRIVER_ENG_VER 20 +#define QEDF_DRIVER_MINOR_VER 42 +#define QEDF_DRIVER_REV_VER 3 +#define QEDF_DRIVER_ENG_VER 0 diff --git a/drivers/scsi/qedi/qedi_dbg.h b/drivers/scsi/qedi/qedi_dbg.h index 243acc8b520a..37d084086fd4 100644 --- a/drivers/scsi/qedi/qedi_dbg.h +++ b/drivers/scsi/qedi/qedi_dbg.h @@ -44,7 +44,7 @@ extern uint qedi_dbg_log; #define QEDI_LOG_LPORT 0x4000 /* lport logs */ #define QEDI_LOG_ELS 0x8000 /* ELS logs */ #define QEDI_LOG_NPIV 0x10000 /* NPIV logs */ -#define QEDI_LOG_SESS 0x20000 /* Conection setup, cleanup */ +#define QEDI_LOG_SESS 0x20000 /* Connection setup, cleanup */ #define QEDI_LOG_UIO 0x40000 /* iSCSI UIO logs */ #define QEDI_LOG_TID 0x80000 /* FW TID context acquire, * free diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index e5760c4a27f0..3337cd341d21 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -357,10 +357,6 @@ #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) -#include <asm/sn/io.h> -#endif - /* * Compile time Options: @@ -380,11 +376,6 @@ #define NVRAM_DELAY() udelay(500) /* 2 microseconds */ -#if defined(__ia64__) && !defined(ia64_platform_is) -#define ia64_platform_is(foo) (!strcmp(x, platform_name)) -#endif - - #define IS_ISP1040(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020) #define IS_ISP1x40(ha) (ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1020 || \ ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP1240) @@ -1427,15 +1418,6 @@ qla1280_initialize_adapter(struct scsi_qla_host *ha) ha->flags.reset_active = 0; ha->flags.abort_isp_active = 0; -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ia64_platform_is("sn2")) { - printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " - "dual channel lockup workaround\n", ha->host_no); - ha->flags.use_pci_vchannel = 1; - driver_setup.no_nvram = 1; - } -#endif - /* TODO: implement support for the 1040 nvram format */ if (IS_ISP1040(ha)) driver_setup.no_nvram = 1; @@ -1717,6 +1699,16 @@ qla1280_load_firmware_pio(struct scsi_qla_host *ha) return err; } +#ifdef QLA_64BIT_PTR +#define LOAD_CMD MBC_LOAD_RAM_A64_ROM +#define DUMP_CMD MBC_DUMP_RAM_A64_ROM +#define CMD_ARGS (BIT_7 | BIT_6 | BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#else +#define LOAD_CMD MBC_LOAD_RAM +#define DUMP_CMD MBC_DUMP_RAM +#define CMD_ARGS (BIT_4 | BIT_3 | BIT_2 | BIT_1 | BIT_0) +#endif + #define DUMP_IT_BACK 0 /* for debug of RISC loading */ static int qla1280_load_firmware_dma(struct scsi_qla_host *ha) @@ -1766,7 +1758,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) for(i = 0; i < cnt; i++) ((__le16 *)ha->request_ring)[i] = fw_data[i]; - mb[0] = MBC_LOAD_RAM; + mb[0] = LOAD_CMD; mb[1] = risc_address; mb[4] = cnt; mb[3] = ha->request_dma & 0xffff; @@ -1777,8 +1769,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) __func__, mb[0], (void *)(long)ha->request_dma, mb[6], mb[7], mb[2], mb[3]); - err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | - BIT_1 | BIT_0, mb); + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); if (err) { printk(KERN_ERR "scsi(%li): Failed to load partial " "segment of f\n", ha->host_no); @@ -1786,7 +1777,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) } #if DUMP_IT_BACK - mb[0] = MBC_DUMP_RAM; + mb[0] = DUMP_CMD; mb[1] = risc_address; mb[4] = cnt; mb[3] = p_tbuf & 0xffff; @@ -1794,8 +1785,7 @@ qla1280_load_firmware_dma(struct scsi_qla_host *ha) mb[7] = upper_32_bits(p_tbuf) & 0xffff; mb[6] = upper_32_bits(p_tbuf) >> 16; - err = qla1280_mailbox_command(ha, BIT_4 | BIT_3 | BIT_2 | - BIT_1 | BIT_0, mb); + err = qla1280_mailbox_command(ha, CMD_ARGS, mb); if (err) { printk(KERN_ERR "Failed to dump partial segment of f/w\n"); @@ -2251,13 +2241,6 @@ qla1280_nvram_config(struct scsi_qla_host *ha) mb[1] = nv->firmware_feature.f.enable_fast_posting; mb[1] |= nv->firmware_feature.f.report_lvd_bus_transition << 1; mb[1] |= nv->firmware_feature.f.disable_synchronous_backoff << 5; -#if defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_SGI_SN2) - if (ia64_platform_is("sn2")) { - printk(KERN_INFO "scsi(%li): Enabling SN2 PCI DMA " - "workaround\n", ha->host_no); - mb[1] |= nv->firmware_feature.f.unused_9 << 9; /* XXX */ - } -#endif status |= qla1280_mailbox_command(ha, BIT_1 | BIT_0, mb); /* Retry count and delay. */ @@ -2888,12 +2871,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) break; dma_handle = sg_dma_address(s); -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif *dword_ptr++ = cpu_to_le32(lower_32_bits(dma_handle)); *dword_ptr++ = @@ -2950,12 +2927,6 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp) if (cnt == 5) break; dma_handle = sg_dma_address(s); -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - if (ha->flags.use_pci_vchannel) - sn_pci_set_vchan(ha->pdev, - (unsigned long *)&dma_handle, - SCSI_BUS_32(cmd)); -#endif *dword_ptr++ = cpu_to_le32(lower_32_bits(dma_handle)); *dword_ptr++ = diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index b496206362a9..e7820b5bca38 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -277,6 +277,8 @@ struct device_reg { #define MBC_MAILBOX_REGISTER_TEST 6 /* Wrap incoming mailboxes */ #define MBC_VERIFY_CHECKSUM 7 /* Verify checksum */ #define MBC_ABOUT_FIRMWARE 8 /* Get firmware revision */ +#define MBC_LOAD_RAM_A64_ROM 9 /* Load RAM 64bit ROM version */ +#define MBC_DUMP_RAM_A64_ROM 0x0a /* Dump RAM 64bit ROM version */ #define MBC_INIT_REQUEST_QUEUE 0x10 /* Initialize request queue */ #define MBC_INIT_RESPONSE_QUEUE 0x11 /* Initialize response queue */ #define MBC_EXECUTE_IOCB 0x12 /* Execute IOCB command */ @@ -1055,9 +1057,6 @@ struct scsi_qla_host { uint32_t reset_active:1; /* 3 */ uint32_t abort_isp_active:1; /* 4 */ uint32_t disable_risc_code_load:1; /* 5 */ -#ifdef __ia64__ - uint32_t use_pci_vchannel:1; -#endif } flags; struct nvram nvram; diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 8d560c562e9c..d7e7043f9eab 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -102,8 +102,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla8044_idc_lock(ha); qla82xx_set_reset_owner(vha); qla8044_idc_unlock(ha); - } else + } else { + ha->fw_dump_mpi = 1; qla2x00_system_error(vha); + } break; case 4: if (IS_P3P_TYPE(ha)) { @@ -176,6 +178,7 @@ qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj, faddr = ha->flt_region_nvram; if (IS_QLA28XX(ha)) { + qla28xx_get_aux_images(vha, &active_regions); if (active_regions.aux.vpd_nvram == QLA27XX_SECONDARY_IMAGE) faddr = ha->flt_region_nvram_sec; } @@ -382,7 +385,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_size = size; ha->optrom_state = QLA_SREADING; - ha->optrom_buffer = vmalloc(ha->optrom_region_size); + ha->optrom_buffer = vzalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { ql_log(ql_log_warn, vha, 0x7062, "Unable to allocate memory for optrom retrieval " @@ -404,7 +407,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "Reading flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); - memset(ha->optrom_buffer, 0, ha->optrom_region_size); ha->isp_ops->read_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); break; @@ -441,9 +443,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, valid = 0; if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0) valid = 1; - else if (start == (ha->flt_region_boot * 4) || - start == (ha->flt_region_fw * 4)) - valid = 1; else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)) valid = 1; if (!valid) { @@ -457,7 +456,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_size = size; ha->optrom_state = QLA_SWRITING; - ha->optrom_buffer = vmalloc(ha->optrom_region_size); + ha->optrom_buffer = vzalloc(ha->optrom_region_size); if (ha->optrom_buffer == NULL) { ql_log(ql_log_warn, vha, 0x7066, "Unable to allocate memory for optrom update " @@ -472,7 +471,6 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "Staging flash region write -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); - memset(ha->optrom_buffer, 0, ha->optrom_region_size); break; case 3: if (ha->optrom_state != QLA_SWRITING) { @@ -491,8 +489,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "Writing flash region -- 0x%x/0x%x.\n", ha->optrom_region_start, ha->optrom_region_size); - ha->isp_ops->write_optrom(vha, ha->optrom_buffer, + rval = ha->isp_ops->write_optrom(vha, ha->optrom_buffer, ha->optrom_region_start, ha->optrom_region_size); + if (rval) + rval = -EIO; break; default: rval = -EINVAL; @@ -726,7 +726,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, break; } else { /* Make sure FC side is not in reset */ - qla2x00_wait_for_hba_online(vha); + WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) != + QLA_SUCCESS); /* Issue MPI reset */ scsi_block_requests(vha->host); @@ -1126,7 +1127,8 @@ qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr, char pci_info[30]; return scnprintf(buf, PAGE_SIZE, "%s\n", - vha->hw->isp_ops->pci_info_str(vha, pci_info)); + vha->hw->isp_ops->pci_info_str(vha, pci_info, + sizeof(pci_info))); } static ssize_t @@ -2920,6 +2922,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) struct qla_hw_data *ha = vha->hw; uint16_t id = vha->vp_idx; + set_bit(VPORT_DELETE, &vha->dpc_flags); + while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) || test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) msleep(1000); @@ -2956,6 +2960,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport) dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); + vha->gnl.l = NULL; + vfree(vha->scan.l); if (vha->qpair && vha->qpair->vp_idx == vha->vp_idx) { diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 5441557b424b..d7169e43f5e1 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -12,10 +12,8 @@ #include <linux/bsg-lib.h> /* BSG support for ELS/CT pass through */ -void -qla2x00_bsg_job_done(void *ptr, int res) +void qla2x00_bsg_job_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_reply *bsg_reply = bsg_job->reply; @@ -25,10 +23,8 @@ qla2x00_bsg_job_done(void *ptr, int res) sp->free(sp); } -void -qla2x00_bsg_sp_free(void *ptr) +void qla2x00_bsg_sp_free(srb_t *sp) { - srb_t *sp = ptr; struct qla_hw_data *ha = sp->vha->hw; struct bsg_job *bsg_job = sp->u.bsg_job; struct fc_bsg_request *bsg_request = bsg_job->request; @@ -58,7 +54,8 @@ qla2x00_bsg_sp_free(void *ptr) if (sp->type == SRB_CT_CMD || sp->type == SRB_FXIOCB_BCMD || sp->type == SRB_ELS_CMD_HST) - kfree(sp->fcport); + qla2x00_free_fcport(sp->fcport); + qla2x00_rel_sp(sp); } @@ -257,7 +254,7 @@ qla2x00_process_els(struct bsg_job *bsg_job) srb_t *sp; const char *type; int req_sg_cnt, rsp_sg_cnt; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); uint16_t nextlid = 0; if (bsg_request->msgcode == FC_BSG_RPT_ELS) { @@ -341,6 +338,8 @@ qla2x00_process_els(struct bsg_job *bsg_job) dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); if (!req_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); rval = -ENOMEM; goto done_free_fcport; } @@ -348,6 +347,8 @@ qla2x00_process_els(struct bsg_job *bsg_job) rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); if (!rsp_sg_cnt) { + dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE); rval = -ENOMEM; goto done_free_fcport; } @@ -405,7 +406,7 @@ done_unmap_sg: done_free_fcport: if (bsg_request->msgcode == FC_BSG_RPT_ELS) - kfree(fcport); + qla2x00_free_fcport(fcport); done: return rval; } @@ -432,7 +433,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); int req_sg_cnt, rsp_sg_cnt; uint16_t loop_id; struct fc_port *fcport; @@ -545,7 +546,7 @@ qla2x00_process_ct(struct bsg_job *bsg_job) return rval; done_free_fcport: - kfree(fcport); + qla2x00_free_fcport(fcport); done_unmap_sg: dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE); @@ -796,7 +797,7 @@ qla2x00_process_loopback(struct bsg_job *bsg_job) if (atomic_read(&vha->loop_state) == LOOP_READY && (ha->current_topology == ISP_CFG_F || - (le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && + (get_unaligned_le32(req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && elreq.options == EXTERNAL_LOOPBACK) { type = "FC_BSG_HST_VENDOR_ECHO_DIAG"; @@ -1778,8 +1779,8 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) uint16_t nextlid = 0; uint32_t tot_dsds; srb_t *sp = NULL; - uint32_t req_data_len = 0; - uint32_t rsp_data_len = 0; + uint32_t req_data_len; + uint32_t rsp_data_len; /* Check the type of the adapter */ if (!IS_BIDI_CAPABLE(ha)) { @@ -1884,6 +1885,9 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) goto done_unmap_sg; } + req_data_len = bsg_job->request_payload.payload_len; + rsp_data_len = bsg_job->reply_payload.payload_len; + if (req_data_len != rsp_data_len) { rval = EXT_STATUS_BUSY; ql_log(ql_log_warn, vha, 0x70aa, @@ -1891,10 +1895,6 @@ qla24xx_process_bidir_cmd(struct bsg_job *bsg_job) goto done_unmap_sg; } - req_data_len = bsg_job->request_payload.payload_len; - rsp_data_len = bsg_job->reply_payload.payload_len; - - /* Alloc SRB structure */ sp = qla2x00_get_sp(vha, &(vha->bidir_fcport), GFP_KERNEL); if (!sp) { @@ -1951,7 +1951,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) struct Scsi_Host *host = fc_bsg_to_shost(bsg_job); scsi_qla_host_t *vha = shost_priv(host); struct qla_hw_data *ha = vha->hw; - int rval = (DRIVER_ERROR << 16); + int rval = (DID_ERROR << 16); struct qla_mt_iocb_rqst_fx00 *piocb_rqst; srb_t *sp; int req_sg_cnt = 0, rsp_sg_cnt = 0; @@ -2050,7 +2050,7 @@ qlafx00_mgmt_cmd(struct bsg_job *bsg_job) return rval; done_free_fcport: - kfree(fcport); + qla2x00_free_fcport(fcport); done_unmap_rsp_sg: if (piocb_rqst->flags & SRB_FXDISC_RESP_DMA_VALID) @@ -2400,7 +2400,7 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job) struct qla_active_regions regions = { }; struct active_regions active_regions = { }; - qla28xx_get_aux_images(vha, &active_regions); + qla27xx_get_active_image(vha, &active_regions); regions.global_image = active_regions.global; if (IS_QLA28XX(ha)) { diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index 9e80646722e2..88a56e8480f7 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -18,7 +18,7 @@ * | Device Discovery | 0x2134 | 0x210e-0x2116 | * | | | 0x211a | * | | | 0x211c-0x2128 | - * | | | 0x212a-0x2130 | + * | | | 0x212a-0x2134 | * | Queue Command and IO tracing | 0x3074 | 0x300b | * | | | 0x3027-0x3028 | * | | | 0x303d-0x3041 | @@ -2519,12 +2519,6 @@ qla83xx_fw_dump_failed: /* Driver Debug Functions. */ /****************************************************************************/ -static inline int -ql_mask_match(uint level) -{ - return (level & ql2xextended_error_logging) == level; -} - /* * This function is for formatting and logging debug information. * It is to be used when vha is available. It formats the message @@ -2743,7 +2737,8 @@ ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id) void -ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, void *buf, uint size) +ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf, + uint size) { uint cnt; diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h index bb01b680ce9f..433e95502808 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.h +++ b/drivers/scsi/qla2xxx/qla_dbg.h @@ -374,3 +374,9 @@ extern int qla24xx_dump_ram(struct qla_hw_data *, uint32_t, uint32_t *, extern void qla24xx_pause_risc(struct device_reg_24xx __iomem *, struct qla_hw_data *); extern int qla24xx_soft_reset(struct qla_hw_data *); + +static inline int +ql_mask_match(uint level) +{ + return (level & ql2xextended_error_logging) == level; +} diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index bad2b12604f1..ed32e9715794 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -34,6 +34,20 @@ #include <scsi/scsi_transport_fc.h> #include <scsi/scsi_bsg_fc.h> +/* Big endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ +typedef struct { + uint8_t domain; + uint8_t area; + uint8_t al_pa; +} be_id_t; + +/* Little endian Fibre Channel S_ID (source ID) or D_ID (destination ID). */ +typedef struct { + uint8_t al_pa; + uint8_t area; + uint8_t domain; +} le_id_t; + #include "qla_bsg.h" #include "qla_dsd.h" #include "qla_nx.h" @@ -117,9 +131,9 @@ #define RD_REG_BYTE_RELAXED(addr) readb_relaxed(addr) #define RD_REG_WORD_RELAXED(addr) readw_relaxed(addr) #define RD_REG_DWORD_RELAXED(addr) readl_relaxed(addr) -#define WRT_REG_BYTE(addr, data) writeb(data,addr) -#define WRT_REG_WORD(addr, data) writew(data,addr) -#define WRT_REG_DWORD(addr, data) writel(data,addr) +#define WRT_REG_BYTE(addr, data) writeb(data, addr) +#define WRT_REG_WORD(addr, data) writew(data, addr) +#define WRT_REG_DWORD(addr, data) writel(data, addr) /* * ISP83XX specific remote register addresses @@ -207,7 +221,7 @@ * 133Mhz slot. */ #define RD_REG_WORD_PIO(addr) (inw((unsigned long)addr)) -#define WRT_REG_WORD_PIO(addr, data) (outw(data,(unsigned long)addr)) +#define WRT_REG_WORD_PIO(addr, data) (outw(data, (unsigned long)addr)) /* * Fibre Channel device definitions. @@ -303,7 +317,8 @@ struct srb_cmd { uint32_t request_sense_length; uint32_t fw_sense_length; uint8_t *request_sense_ptr; - void *ctx; + struct ct6_dsd *ct6_ctx; + struct crc_context *crc_ctx; }; /* @@ -343,6 +358,51 @@ typedef union { } port_id_t; #define INVALID_PORT_ID 0xFFFFFF +static inline le_id_t be_id_to_le(be_id_t id) +{ + le_id_t res; + + res.domain = id.domain; + res.area = id.area; + res.al_pa = id.al_pa; + + return res; +} + +static inline be_id_t le_id_to_be(le_id_t id) +{ + be_id_t res; + + res.domain = id.domain; + res.area = id.area; + res.al_pa = id.al_pa; + + return res; +} + +static inline port_id_t be_to_port_id(be_id_t id) +{ + port_id_t res; + + res.b.domain = id.domain; + res.b.area = id.area; + res.b.al_pa = id.al_pa; + res.b.rsvd_1 = 0; + + return res; +} + +static inline be_id_t port_id_to_be_id(port_id_t port_id) +{ + be_id_t res; + + res.domain = port_id.b.domain; + res.area = port_id.b.area; + res.al_pa = port_id.b.al_pa; + + return res; +} + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; @@ -395,7 +455,7 @@ struct srb_iocb { struct els_logo_payload *els_logo_pyld; dma_addr_t els_logo_pyld_dma; } els_logo; - struct { + struct els_plogi { #define ELS_DCMD_PLOGI 0x3 uint32_t flags; uint32_t els_cmd; @@ -531,18 +591,23 @@ typedef struct srb { */ uint8_t cmd_type; uint8_t pad[3]; - atomic_t ref_count; struct kref cmd_kref; /* need to migrate ref_count over to this */ void *priv; wait_queue_head_t nvme_ls_waitq; struct fc_port *fcport; struct scsi_qla_host *vha; + unsigned int start_timer:1; + unsigned int abort:1; + unsigned int aborted:1; + unsigned int completed:1; + uint32_t handle; uint16_t flags; uint16_t type; const char *name; int iocbs; struct qla_qpair *qpair; + struct srb *cmd_sp; struct list_head elem; u32 gen1; /* scratch */ u32 gen2; /* scratch */ @@ -554,14 +619,22 @@ typedef struct srb { struct bsg_job *bsg_job; struct srb_cmd scmd; } u; - void (*done)(void *, int); - void (*free)(void *); + /* + * Report completion status @res and call sp_put(@sp). @res is + * an NVMe status code, a SCSI result (e.g. DID_OK << 16) or a + * QLA_* status value. + */ + void (*done)(struct srb *sp, int res); + /* Stop the timer and free @sp. Only used by the FCP code. */ + void (*free)(struct srb *sp); + /* + * Call nvme_private->fd->done() and free @sp. Only used by the NVMe + * code. + */ void (*put_fn)(struct kref *kref); } srb_t; #define GET_CMD_SP(sp) (sp->u.scmd.cmd) -#define SET_CMD_SP(sp, cmd) (sp->u.scmd.cmd = cmd) -#define GET_CMD_CTX_SP(sp) (sp->u.scmd.ctx) #define GET_CMD_SENSE_LEN(sp) \ (sp->u.scmd.request_sense_length) @@ -921,6 +994,11 @@ struct mbx_cmd_32 { #define MBS_LINK_DOWN_ERROR 0x400B #define MBS_DIAG_ECHO_TEST_ERROR 0x400C +static inline bool qla2xxx_is_valid_mbs(unsigned int mbs) +{ + return MBS_COMMAND_COMPLETE <= mbs && mbs <= MBS_DIAG_ECHO_TEST_ERROR; +} + /* * ISP mailbox asynchronous event status codes */ @@ -1851,7 +1929,7 @@ struct crc_context { uint16_t reserved_2; uint16_t reserved_3; uint32_t reserved_4; - struct dsd64 data_dsd; + struct dsd64 data_dsd[1]; uint32_t reserved_5[2]; uint32_t reserved_6; } nobundling; @@ -1861,7 +1939,7 @@ struct crc_context { uint16_t reserved_1; __le16 dseg_count; /* Data segment count */ uint32_t reserved_2; - struct dsd64 data_dsd; + struct dsd64 data_dsd[1]; struct dsd64 dif_dsd; } bundling; } u; @@ -2203,7 +2281,7 @@ typedef struct { uint8_t fabric_port_name[WWN_SIZE]; uint16_t fp_speed; uint8_t fc4_type; - uint8_t fc4f_nvme; /* nvme fc4 feature bits */ + uint8_t fc4_features; } sw_info_t; /* FCP-4 types */ @@ -2289,22 +2367,6 @@ enum login_state { /* FW control Target side */ DSC_LS_LOGO_PEND, }; -enum fcport_mgt_event { - FCME_RELOGIN = 1, - FCME_RSCN, - FCME_PLOGI_DONE, /* Initiator side sent LLIOCB */ - FCME_PRLI_DONE, - FCME_GNL_DONE, - FCME_GPSC_DONE, - FCME_GPDB_DONE, - FCME_GPNID_DONE, - FCME_GFFID_DONE, - FCME_ADISC_DONE, - FCME_GNNID_DONE, - FCME_GFPNID_DONE, - FCME_ELS_PLOGI_DONE, -}; - enum rscn_addr_format { RSCN_PORT_ADDR, RSCN_AREA_ADDR, @@ -2338,6 +2400,9 @@ typedef struct fc_port { unsigned int query:1; unsigned int id_changed:1; unsigned int scan_needed:1; + unsigned int n2n_flag:1; + unsigned int explicit_logout:1; + unsigned int prli_pend_timer:1; struct completion nvme_del_done; uint32_t nvme_prli_service_param; @@ -2364,6 +2429,7 @@ typedef struct fc_port { struct work_struct free_work; struct work_struct reg_work; uint64_t jiffies_at_registration; + unsigned long prli_expired; struct qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; uint16_t tgt_id; @@ -2386,9 +2452,8 @@ typedef struct fc_port { u32 supported_classes; uint8_t fc4_type; - uint8_t fc4f_nvme; + uint8_t fc4_features; uint8_t scan_state; - uint8_t n2n_flag; unsigned long last_queue_full; unsigned long last_ramp_up; @@ -2401,6 +2466,7 @@ typedef struct fc_port { struct qla_tgt_sess *tgt_session; struct ct_sns_desc ct_desc; enum discovery_state disc_state; + atomic_t shadow_disc_state; enum discovery_state next_disc_state; enum login_state fw_login_state; unsigned long dm_login_expire; @@ -2418,11 +2484,15 @@ typedef struct fc_port { u16 n2n_chip_reset; } fc_port_t; +enum { + FC4_PRIORITY_NVME = 1, + FC4_PRIORITY_FCP = 2, +}; + #define QLA_FCPORT_SCAN 1 #define QLA_FCPORT_FOUND 2 struct event_arg { - enum fcport_mgt_event event; fc_port_t *fcport; srb_t *sp; port_id_t id; @@ -2443,6 +2513,19 @@ struct event_arg { extern const char *const port_state_str[5]; +static const char * const port_dstate_str[] = { + "DELETED", + "GNN_ID", + "GNL", + "LOGIN_PEND", + "LOGIN_FAILED", + "GPDB", + "UPD_FCPORT", + "LOGIN_COMPLETE", + "ADISC", + "DELETE_PEND" +}; + /* * FC port flags. */ @@ -2745,7 +2828,7 @@ struct ct_sns_req { /* GA_NXT, GPN_ID, GNN_ID, GFT_ID, GFPN_ID */ struct { uint8_t reserved; - uint8_t port_id[3]; + be_id_t port_id; } port_id; struct { @@ -2764,13 +2847,13 @@ struct ct_sns_req { struct { uint8_t reserved; - uint8_t port_id[3]; + be_id_t port_id; uint8_t fc4_types[32]; } rft_id; struct { uint8_t reserved; - uint8_t port_id[3]; + be_id_t port_id; uint16_t reserved2; uint8_t fc4_feature; uint8_t fc4_type; @@ -2778,7 +2861,7 @@ struct ct_sns_req { struct { uint8_t reserved; - uint8_t port_id[3]; + be_id_t port_id; uint8_t node_name[8]; } rnn_id; @@ -2865,7 +2948,7 @@ struct ct_rsp_hdr { struct ct_sns_gid_pt_data { uint8_t control_byte; - uint8_t port_id[3]; + be_id_t port_id; }; /* It's the same for both GPN_FT and GNN_FT */ @@ -2895,7 +2978,7 @@ struct ct_sns_rsp { union { struct { uint8_t port_type; - uint8_t port_id[3]; + be_id_t port_id; uint8_t port_name[8]; uint8_t sym_port_name_len; uint8_t sym_port_name[255]; @@ -2979,6 +3062,7 @@ enum scan_flags_t { enum fc4type_t { FS_FC4TYPE_FCP = BIT_0, FS_FC4TYPE_NVME = BIT_1, + FS_FCP_IS_N2N = BIT_7, }; struct fab_scan_rp { @@ -3111,7 +3195,7 @@ struct isp_operations { void (*update_fw_options) (struct scsi_qla_host *); int (*load_risc) (struct scsi_qla_host *, uint32_t *); - char * (*pci_info_str) (struct scsi_qla_host *, char *); + char * (*pci_info_str)(struct scsi_qla_host *, char *, size_t); char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t); irq_handler_t intr_handler; @@ -3195,7 +3279,6 @@ enum qla_work_type { QLA_EVT_IDC_ACK, QLA_EVT_ASYNC_LOGIN, QLA_EVT_ASYNC_LOGOUT, - QLA_EVT_ASYNC_LOGOUT_DONE, QLA_EVT_ASYNC_ADISC, QLA_EVT_UEVENT, QLA_EVT_AENFX, @@ -3850,7 +3933,7 @@ struct qla_hw_data { /* NVRAM configuration data */ #define MAX_NVRAM_SIZE 4096 -#define VPD_OFFSET MAX_NVRAM_SIZE / 2 +#define VPD_OFFSET (MAX_NVRAM_SIZE / 2) uint16_t nvram_size; uint16_t nvram_base; void *nvram; @@ -3885,7 +3968,7 @@ struct qla_hw_data { void *sfp_data; dma_addr_t sfp_data_dma; - void *flt; + struct qla_flt_header *flt; dma_addr_t flt_dma; #define XGMAC_DATA_SIZE 4096 @@ -4233,6 +4316,8 @@ struct qla_hw_data { atomic_t nvme_active_aen_cnt; uint16_t nvme_last_rptd_aen; /* Last recorded aen count */ + uint8_t fc4_type_priority; + atomic_t zio_threshold; uint16_t last_zio_threshold; @@ -4337,6 +4422,7 @@ typedef struct scsi_qla_host { #define IOCB_WORK_ACTIVE 31 #define SET_ZIO_THRESHOLD_NEEDED 32 #define ISP_ABORT_TO_ROM 33 +#define VPORT_DELETE 34 unsigned long pci_flags; #define PFLG_DISCONNECTED 0 /* PCI device removed */ @@ -4628,6 +4714,7 @@ struct secure_flash_update_block_pk { #define QLA_SUSPENDED 0x106 #define QLA_BUSY 0x107 #define QLA_ALREADY_REGISTERED 0x109 +#define QLA_OS_TIMER_EXPIRED 0x10a #define NVRAM_DELAY() udelay(10) @@ -4756,6 +4843,26 @@ struct sff_8247_a0 { ha->current_topology == ISP_CFG_N || \ !ha->current_topology) +#define NVME_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_NVME) \ + +#define FCP_TYPE(fcport) \ + (fcport->fc4_type & FS_FC4TYPE_FCP) \ + +#define NVME_ONLY_TARGET(fcport) \ + (NVME_TYPE(fcport) && !FCP_TYPE(fcport)) \ + +#define NVME_FCP_TARGET(fcport) \ + (FCP_TYPE(fcport) && NVME_TYPE(fcport)) \ + +#define NVME_TARGET(ha, fcport) \ + ((NVME_FCP_TARGET(fcport) && \ + (ha->fc4_type_priority == FC4_PRIORITY_NVME)) || \ + NVME_ONLY_TARGET(fcport)) \ + +#define PRLI_PHASE(_cls) \ + ((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP)) + #include "qla_target.h" #include "qla_gbl.h" #include "qla_dbg.h" diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c index a432caebefec..0a6fb359f4d5 100644 --- a/drivers/scsi/qla2xxx/qla_dfs.c +++ b/drivers/scsi/qla2xxx/qla_dfs.c @@ -57,10 +57,9 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) { scsi_qla_host_t *vha = s->private; struct qla_hw_data *ha = vha->hw; - struct gid_list_info *gid_list; + struct gid_list_info *gid_list, *gid; dma_addr_t gid_list_dma; fc_port_t fc_port; - char *id_iter; int rc, i; uint16_t entries, loop_id; struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; @@ -82,13 +81,11 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) if (rc != QLA_SUCCESS) goto out_free_id_list; - id_iter = (char *)gid_list; + gid = gid_list; seq_puts(s, "Port Name Port ID Loop ID\n"); for (i = 0; i < entries; i++) { - struct gid_list_info *gid = - (struct gid_list_info *)id_iter; loop_id = le16_to_cpu(gid->loop_id); memset(&fc_port, 0, sizeof(fc_port_t)); @@ -99,7 +96,7 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused) fc_port.port_name, fc_port.d_id.b.domain, fc_port.d_id.b.area, fc_port.d_id.b.al_pa, fc_port.loop_id); - id_iter += ha->gid_list_info_size; + gid = (void *)gid + ha->gid_list_info_size; } out_free_id_list: dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha), diff --git a/drivers/scsi/qla2xxx/qla_dsd.h b/drivers/scsi/qla2xxx/qla_dsd.h index 7479924ba422..20788054b91b 100644 --- a/drivers/scsi/qla2xxx/qla_dsd.h +++ b/drivers/scsi/qla2xxx/qla_dsd.h @@ -1,6 +1,8 @@ #ifndef _QLA_DSD_H_ #define _QLA_DSD_H_ +#include <asm/unaligned.h> + /* 32-bit data segment descriptor (8 bytes) */ struct dsd32 { __le32 address; diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h index df079a8c2b33..d641918cdd46 100644 --- a/drivers/scsi/qla2xxx/qla_fw.h +++ b/drivers/scsi/qla2xxx/qla_fw.h @@ -761,13 +761,13 @@ struct els_entry_24xx { #define ECF_CLR_PASSTHRU_PEND BIT_12 #define ECF_INCL_FRAME_HDR BIT_11 - uint32_t rx_byte_count; - uint32_t tx_byte_count; + __le32 rx_byte_count; + __le32 tx_byte_count; __le64 tx_address __packed; /* Data segment 0 address. */ - uint32_t tx_len; /* Data segment 0 length. */ + __le32 tx_len; /* Data segment 0 length. */ __le64 rx_address __packed; /* Data segment 1 address. */ - uint32_t rx_len; /* Data segment 1 length. */ + __le32 rx_len; /* Data segment 1 length. */ }; struct els_sts_entry_24xx { @@ -1354,12 +1354,12 @@ struct vp_rpt_id_entry_24xx { uint8_t port_id[3]; uint8_t format; union { - struct { + struct _f0 { /* format 0 loop */ uint8_t vp_idx_map[16]; uint8_t reserved_4[32]; } f0; - struct { + struct _f1 { /* format 1 fabric */ uint8_t vpstat1_subcode; /* vp_status=1 subcode */ uint8_t flags; @@ -1381,21 +1381,22 @@ struct vp_rpt_id_entry_24xx { uint16_t bbcr; uint8_t reserved_5[6]; } f1; - struct { /* format 2: N2N direct connect */ - uint8_t vpstat1_subcode; - uint8_t flags; - uint16_t rsv6; - uint8_t rsv2[12]; - - uint8_t ls_rjt_vendor; - uint8_t ls_rjt_explanation; - uint8_t ls_rjt_reason; - uint8_t rsv3[5]; - - uint8_t port_name[8]; - uint8_t node_name[8]; - uint8_t remote_nport_id[4]; - uint32_t reserved_5; + struct _f2 { /* format 2: N2N direct connect */ + uint8_t vpstat1_subcode; + uint8_t flags; + uint16_t fip_flags; + uint8_t rsv2[12]; + + uint8_t ls_rjt_vendor; + uint8_t ls_rjt_explanation; + uint8_t ls_rjt_reason; + uint8_t rsv3[5]; + + uint8_t port_name[8]; + uint8_t node_name[8]; + uint16_t bbcr; + uint8_t reserved_5[2]; + uint8_t remote_nport_id[4]; } f2; } u; }; @@ -1470,13 +1471,6 @@ struct qla_flt_location { uint16_t checksum; }; -struct qla_flt_header { - uint16_t version; - uint16_t length; - uint16_t checksum; - uint16_t unused; -}; - #define FLT_REG_FW 0x01 #define FLT_REG_BOOT_CODE 0x07 #define FLT_REG_VPD_0 0x14 @@ -1523,6 +1517,10 @@ struct qla_flt_header { #define FLT_REG_NVRAM_SEC_28XX_1 0x10F #define FLT_REG_NVRAM_SEC_28XX_2 0x111 #define FLT_REG_NVRAM_SEC_28XX_3 0x113 +#define FLT_REG_MPI_PRI_28XX 0xD3 +#define FLT_REG_MPI_SEC_28XX 0xF0 +#define FLT_REG_PEP_PRI_28XX 0xD1 +#define FLT_REG_PEP_SEC_28XX 0xF1 struct qla_flt_region { uint16_t code; @@ -1533,6 +1531,14 @@ struct qla_flt_region { uint32_t end; }; +struct qla_flt_header { + uint16_t version; + uint16_t length; + uint16_t checksum; + uint16_t unused; + struct qla_flt_region region[0]; +}; + #define FLT_REGION_SIZE 16 #define FLT_MAX_REGIONS 0xFF #define FLT_REGIONS_SIZE (FLT_REGION_SIZE * FLT_MAX_REGIONS) @@ -2101,4 +2107,6 @@ struct qla_fcp_prio_cfg { #define FA_FLASH_LAYOUT_ADDR_83 (0x3F1000/4) #define FA_FLASH_LAYOUT_ADDR_28 (0x11000/4) +#define NVRAM_DUAL_FCP_NVME_FLAG_OFFSET 0x196 + #endif diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f9669fdf7798..2a64729a2bc5 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -45,6 +45,8 @@ extern int qla2x00_local_device_login(scsi_qla_host_t *, fc_port_t *); extern int qla24xx_els_dcmd_iocb(scsi_qla_host_t *, int, port_id_t); extern int qla24xx_els_dcmd2_iocb(scsi_qla_host_t *, int, fc_port_t *, bool); +extern void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, + struct els_plogi *els_plogi); extern void qla2x00_update_fcports(scsi_qla_host_t *); @@ -70,14 +72,13 @@ extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern void qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *, - uint16_t *); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); int qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e); extern void *qla2x00_alloc_iocbs_ready(struct qla_qpair *, srb_t *); extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern void qla2x00_set_fcport_state(fc_port_t *fcport, int state); extern fc_port_t * @@ -96,7 +97,11 @@ extern int qla2x00_init_rings(scsi_qla_host_t *); extern struct qla_qpair *qla2xxx_create_qpair(struct scsi_qla_host *, int, int, bool); extern int qla2xxx_delete_qpair(struct scsi_qla_host *, struct qla_qpair *); -void qla2x00_fcport_event_handler(scsi_qla_host_t *, struct event_arg *); +void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea); +void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, + struct event_arg *ea); +void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, + struct event_arg *ea); int qla24xx_async_gpdb(struct scsi_qla_host *, fc_port_t *, u8); int qla24xx_async_prli(struct scsi_qla_host *, fc_port_t *); int qla24xx_async_notify_ack(scsi_qla_host_t *, fc_port_t *, @@ -176,8 +181,6 @@ extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *, - fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_work(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_post_async_adisc_done_work(struct scsi_qla_host *, @@ -195,6 +198,7 @@ extern void qla2x00_free_host(struct scsi_qla_host *); extern void qla2x00_relogin(struct scsi_qla_host *); extern void qla2x00_do_work(struct scsi_qla_host *); extern void qla2x00_free_fcports(struct scsi_qla_host *); +extern void qla2x00_free_fcport(fc_port_t *); extern void qla83xx_schedule_work(scsi_qla_host_t *, int); extern void qla83xx_service_idc_aen(struct work_struct *); @@ -213,9 +217,9 @@ extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern int qla2x00_post_uevent_work(struct scsi_qla_host *, u32); extern void qla2x00_disable_board_on_pci_error(struct work_struct *); -extern void qla2x00_sp_compl(void *, int); -extern void qla2xxx_qpair_sp_free_dma(void *); -extern void qla2xxx_qpair_sp_compl(void *, int); +extern void qla2x00_sp_compl(srb_t *sp, int); +extern void qla2xxx_qpair_sp_free_dma(srb_t *sp); +extern void qla2xxx_qpair_sp_compl(srb_t *sp, int); extern void qla24xx_sched_upd_fcport(fc_port_t *); void qla2x00_handle_login_done_event(struct scsi_qla_host *, fc_port_t *, uint16_t *); @@ -244,11 +248,12 @@ extern void qla2x00_do_dpc_all_vps(scsi_qla_host_t *); extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *); -extern void qla2x00_sp_free_dma(void *); +extern void qla2x00_sp_free_dma(srb_t *sp); extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); -extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int, int); -extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *, int); +extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int); +extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *); +extern int qla24xx_async_abort_cmd(srb_t *, bool); extern struct fw_blob *qla2x00_request_firmware(scsi_qla_host_t *); @@ -272,6 +277,7 @@ extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); extern void qla2x00_build_scsi_iocbs_64(srb_t *, cmd_entry_t *, uint16_t); extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t, struct req_que *); +extern uint32_t qla2xxx_get_next_handle(struct req_que *req); extern int qla2x00_start_scsi(srb_t *sp); extern int qla24xx_start_scsi(srb_t *sp); int qla2x00_marker(struct scsi_qla_host *, struct qla_qpair *, @@ -554,7 +560,7 @@ fc_port_t *qla2x00_find_fcport_by_nportid(scsi_qla_host_t *, port_id_t *, u8); * Global Function Prototypes in qla_sup.c source file. */ extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); -extern uint32_t *qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, +extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, uint32_t, uint32_t); extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, uint32_t); @@ -630,7 +636,7 @@ extern ulong qla27xx_fwdt_template_size(void *); extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); extern void ql_dump_regs(uint, scsi_qla_host_t *, uint); -extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, void *, uint); +extern void ql_dump_buffer(uint, scsi_qla_host_t *, uint, const void *, uint); /* * Global Function Prototypes in qla_gs.c source file. */ @@ -732,7 +738,7 @@ extern int qlafx00_initialize_adapter(struct scsi_qla_host *); extern int qlafx00_soft_reset(scsi_qla_host_t *); extern int qlafx00_chip_diag(scsi_qla_host_t *); extern void qlafx00_config_rings(struct scsi_qla_host *); -extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *); +extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *, size_t); extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t); extern irqreturn_t qlafx00_intr_handler(int, void *); extern void qlafx00_enable_intrs(struct qla_hw_data *); @@ -790,10 +796,10 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); -extern void qla2x00_sp_free(void *); +extern void qla2x00_sp_free(srb_t *sp); extern void qla2x00_sp_timeout(struct timer_list *); -extern void qla2x00_bsg_job_done(void *, int); -extern void qla2x00_bsg_sp_free(void *); +extern void qla2x00_bsg_job_done(srb_t *sp, int); +extern void qla2x00_bsg_sp_free(srb_t *sp); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); /* Interrupt related */ @@ -822,8 +828,8 @@ extern int qla82xx_device_state_handler(scsi_qla_host_t *); extern void qla8xxx_dev_failed_handler(scsi_qla_host_t *); extern void qla82xx_clear_qsnt_ready(scsi_qla_host_t *); -extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, - size_t, char *); +extern void qla2x00_set_model_info(scsi_qla_host_t *, uint8_t *, size_t, + const char *); extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(scsi_qla_host_t *); @@ -910,4 +916,5 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode); /* nvme.c */ void qla_nvme_unregister_remote_port(struct fc_port *fcport); +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea); #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c index 9f58e591666d..aaa4a5bbf2ff 100644 --- a/drivers/scsi/qla2xxx/qla_gs.c +++ b/drivers/scsi/qla2xxx/qla_gs.c @@ -226,9 +226,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ - ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; - ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; - ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -242,9 +240,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) rval = QLA_FUNCTION_FAILED; } else { /* Populate fc_port_t entry. */ - fcport->d_id.b.domain = ct_rsp->rsp.ga_nxt.port_id[0]; - fcport->d_id.b.area = ct_rsp->rsp.ga_nxt.port_id[1]; - fcport->d_id.b.al_pa = ct_rsp->rsp.ga_nxt.port_id[2]; + fcport->d_id = be_to_port_id(ct_rsp->rsp.ga_nxt.port_id); memcpy(fcport->node_name, ct_rsp->rsp.ga_nxt.node_name, WWN_SIZE); @@ -252,7 +248,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport) WWN_SIZE); fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ? - FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER; + FS_FC4TYPE_FCP : FC4_TYPE_OTHER; if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE && ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE) @@ -337,9 +333,7 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list) /* Set port IDs in switch info list. */ for (i = 0; i < ha->max_fibre_devices; i++) { gid_data = &ct_rsp->rsp.gid_pt.entries[i]; - list[i].d_id.b.domain = gid_data->port_id[0]; - list[i].d_id.b.area = gid_data->port_id[1]; - list[i].d_id.b.al_pa = gid_data->port_id[2]; + list[i].d_id = be_to_port_id(gid_data->port_id); memset(list[i].fabric_port_name, 0, WWN_SIZE); list[i].fp_speed = PORT_SPEED_UNKNOWN; @@ -403,9 +397,7 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ - ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; - ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; - ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -472,9 +464,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ - ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; - ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; - ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -509,9 +499,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list) return (rval); } -static void qla2x00_async_sns_sp_done(void *s, int rc) +static void qla2x00_async_sns_sp_done(srb_t *sp, int rc) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct ct_sns_pkt *ct_sns; struct qla_work_evt *e; @@ -639,9 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id) ct_req = qla2x00_prep_ct_req(ct_sns, RFT_ID_CMD, RFT_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, FC-4 types */ - ct_req->req.rft_id.port_id[0] = vha->d_id.b.domain; - ct_req->req.rft_id.port_id[1] = vha->d_id.b.area; - ct_req->req.rft_id.port_id[2] = vha->d_id.b.al_pa; + ct_req->req.rft_id.port_id = port_id_to_be_id(vha->d_id); ct_req->req.rft_id.fc4_types[2] = 0x01; /* FCP-3 */ if (vha->flags.nvme_enabled) @@ -737,9 +724,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id, ct_req = qla2x00_prep_ct_req(ct_sns, RFF_ID_CMD, RFF_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, FC-4 feature, FC-4 type */ - ct_req->req.rff_id.port_id[0] = d_id->b.domain; - ct_req->req.rff_id.port_id[1] = d_id->b.area; - ct_req->req.rff_id.port_id[2] = d_id->b.al_pa; + ct_req->req.rff_id.port_id = port_id_to_be_id(*d_id); ct_req->req.rff_id.fc4_feature = fc4feature; ct_req->req.rff_id.fc4_type = fc4type; /* SCSI - FCP */ @@ -830,9 +815,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id, ct_req = qla2x00_prep_ct_req(ct_sns, RNN_ID_CMD, RNN_ID_RSP_SIZE); /* Prepare CT arguments -- port_id, node_name */ - ct_req->req.rnn_id.port_id[0] = vha->d_id.b.domain; - ct_req->req.rnn_id.port_id[1] = vha->d_id.b.area; - ct_req->req.rnn_id.port_id[2] = vha->d_id.b.al_pa; + ct_req->req.rnn_id.port_id = port_id_to_be_id(vha->d_id); memcpy(ct_req->req.rnn_id.node_name, vha->node_name, WWN_SIZE); sp->u.iocb_cmd.u.ctarg.req_size = RNN_ID_REQ_SIZE; @@ -1479,7 +1462,7 @@ qla24xx_prep_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size, return ct_pkt; } -static inline ms_iocb_entry_t * +static void qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) { struct qla_hw_data *ha = vha->hw; @@ -1493,8 +1476,6 @@ qla2x00_update_ms_fdmi_iocb(scsi_qla_host_t *vha, uint32_t req_size) ms_pkt->req_bytecount = cpu_to_le32(req_size); ms_pkt->req_dsd.length = ms_pkt->req_bytecount; } - - return ms_pkt; } /** @@ -1557,7 +1538,7 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha) /* Attributes */ ct_req->req.rhba.attrs.count = cpu_to_be32(FDMI_HBA_ATTR_COUNT); - entries = ct_req->req.rhba.hba_identifier; + entries = &ct_req->req; /* Nodename. */ eiter = entries + size; @@ -1766,7 +1747,7 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha) /* Attributes */ ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT); - entries = ct_req->req.rpa.port_name; + entries = &ct_req->req; /* FC4 types. */ eiter = entries + size; @@ -1979,7 +1960,7 @@ qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha) /* Attributes */ ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT); - entries = ct_req->req.rhba2.hba_identifier; + entries = &ct_req->req; /* Nodename. */ eiter = entries + size; @@ -2338,7 +2319,7 @@ qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha) /* Attributes */ ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT); - entries = ct_req->req.rpa2.port_name; + entries = &ct_req->req; /* FC4 types. */ eiter = entries + size; @@ -2730,9 +2711,7 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ - ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; - ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; - ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -2908,7 +2887,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) struct ct_sns_req *ct_req; struct ct_sns_rsp *ct_rsp; struct qla_hw_data *ha = vha->hw; - uint8_t fcp_scsi_features = 0; + uint8_t fcp_scsi_features = 0, nvme_features = 0; struct ct_arg arg; for (i = 0; i < ha->max_fibre_devices; i++) { @@ -2936,9 +2915,7 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp = &ha->ct_sns->p.rsp; /* Prepare CT arguments -- port_id */ - ct_req->req.port_id.port_id[0] = list[i].d_id.b.domain; - ct_req->req.port_id.port_id[1] = list[i].d_id.b.area; - ct_req->req.port_id.port_id[2] = list[i].d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(list[i].d_id); /* Execute MS IOCB */ rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma, @@ -2956,14 +2933,19 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list) ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; fcp_scsi_features &= 0x0f; - if (fcp_scsi_features) - list[i].fc4_type = FC4_TYPE_FCP_SCSI; - else - list[i].fc4_type = FC4_TYPE_OTHER; + if (fcp_scsi_features) { + list[i].fc4_type = FS_FC4TYPE_FCP; + list[i].fc4_features = fcp_scsi_features; + } - list[i].fc4f_nvme = + nvme_features = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; - list[i].fc4f_nvme &= 0xf; + nvme_features &= 0xf; + + if (nvme_features) { + list[i].fc4_type |= FS_FC4TYPE_NVME; + list[i].fc4_features = nvme_features; + } } /* Last device exit. */ @@ -2981,7 +2963,6 @@ int qla24xx_post_gpsc_work(struct scsi_qla_host *vha, fc_port_t *fcport) return QLA_FUNCTION_FAILED; e->u.fcport.fcport = fcport; - fcport->flags |= FCF_ASYNC_ACTIVE; return qla2x00_post_work(vha, e); } @@ -3011,9 +2992,8 @@ void qla24xx_handle_gpsc_event(scsi_qla_host_t *vha, struct event_arg *ea) qla_post_iidma_work(vha, fcport); } -static void qla24xx_async_gpsc_sp_done(void *s, int res) +static void qla24xx_async_gpsc_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = sp->fcport; @@ -3029,7 +3009,7 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); if (res == QLA_FUNCTION_TIMEOUT) - return; + goto done; if (res == (DID_ERROR << 16)) { /* entry status error */ @@ -3055,11 +3035,10 @@ static void qla24xx_async_gpsc_sp_done(void *s, int res) be16_to_cpu(ct_rsp->rsp.gpsc.speed)); } memset(&ea, 0, sizeof(ea)); - ea.event = FCME_GPSC_DONE; ea.rc = res; ea.fcport = fcport; ea.sp = sp; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gpsc_event(vha, &ea); done: sp->free(sp); @@ -3117,9 +3096,7 @@ int qla24xx_async_gpsc(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: - fcport->flags &= ~FCF_ASYNC_ACTIVE; return rval; } @@ -3127,7 +3104,8 @@ int qla24xx_post_gpnid_work(struct scsi_qla_host *vha, port_id_t *id) { struct qla_work_evt *e; - if (test_bit(UNLOADING, &vha->dpc_flags)) + if (test_bit(UNLOADING, &vha->dpc_flags) || + (vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags))) return 0; e = qla2x00_alloc_work(vha, QLA_EVT_GPNID); @@ -3144,17 +3122,7 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp) switch (sp->type) { case SRB_ELS_DCMD: - if (c->u.els_plogi.els_plogi_pyld) - dma_free_coherent(&vha->hw->pdev->dev, - c->u.els_plogi.tx_size, - c->u.els_plogi.els_plogi_pyld, - c->u.els_plogi.els_plogi_pyld_dma); - - if (c->u.els_plogi.els_resp_pyld) - dma_free_coherent(&vha->hw->pdev->dev, - c->u.els_plogi.rx_size, - c->u.els_plogi.els_resp_pyld, - c->u.els_plogi.els_resp_pyld_dma); + qla2x00_els_dcmd2_free(vha, &c->u.els_plogi); break; case SRB_CT_PTHRU_CMD: default: @@ -3280,9 +3248,8 @@ void qla24xx_handle_gpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) } } -static void qla2x00_async_gpnid_sp_done(void *s, int res) +static void qla2x00_async_gpnid_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct ct_sns_req *ct_req = (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; @@ -3295,22 +3262,19 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) if (res) ql_dbg(ql_dbg_disc, vha, 0x2066, "Async done-%s fail res %x rscn gen %d ID %3phC. %8phC\n", - sp->name, res, sp->gen1, ct_req->req.port_id.port_id, + sp->name, res, sp->gen1, &ct_req->req.port_id.port_id, ct_rsp->rsp.gpn_id.port_name); else ql_dbg(ql_dbg_disc, vha, 0x2066, "Async done-%s good rscn gen %d ID %3phC. %8phC\n", - sp->name, sp->gen1, ct_req->req.port_id.port_id, + sp->name, sp->gen1, &ct_req->req.port_id.port_id, ct_rsp->rsp.gpn_id.port_name); memset(&ea, 0, sizeof(ea)); memcpy(ea.port_name, ct_rsp->rsp.gpn_id.port_name, WWN_SIZE); ea.sp = sp; - ea.id.b.domain = ct_req->req.port_id.port_id[0]; - ea.id.b.area = ct_req->req.port_id.port_id[1]; - ea.id.b.al_pa = ct_req->req.port_id.port_id[2]; + ea.id = be_to_port_id(ct_req->req.port_id.port_id); ea.rc = res; - ea.event = FCME_GPNID_DONE; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); list_del(&sp->elem); @@ -3329,25 +3293,22 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res) return; } - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gpnid_event(vha, &ea); e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); if (!e) { /* please ignore kernel warning. otherwise, we have mem leak. */ - if (sp->u.iocb_cmd.u.ctarg.req) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.req_allocated_size, - sp->u.iocb_cmd.u.ctarg.req, - sp->u.iocb_cmd.u.ctarg.req_dma); - sp->u.iocb_cmd.u.ctarg.req = NULL; - } - if (sp->u.iocb_cmd.u.ctarg.rsp) { - dma_free_coherent(&vha->hw->pdev->dev, - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, - sp->u.iocb_cmd.u.ctarg.rsp, - sp->u.iocb_cmd.u.ctarg.rsp_dma); - sp->u.iocb_cmd.u.ctarg.rsp = NULL; - } + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.req_allocated_size, + sp->u.iocb_cmd.u.ctarg.req, + sp->u.iocb_cmd.u.ctarg.req_dma); + sp->u.iocb_cmd.u.ctarg.req = NULL; + + dma_free_coherent(&vha->hw->pdev->dev, + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size, + sp->u.iocb_cmd.u.ctarg.rsp, + sp->u.iocb_cmd.u.ctarg.rsp_dma); + sp->u.iocb_cmd.u.ctarg.rsp = NULL; sp->free(sp); return; @@ -3419,9 +3380,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) ct_req = qla2x00_prep_ct_req(ct_sns, GPN_ID_CMD, GPN_ID_RSP_SIZE); /* GPN_ID req */ - ct_req->req.port_id.port_id[0] = id->b.domain; - ct_req->req.port_id.port_id[1] = id->b.area; - ct_req->req.port_id.port_id[2] = id->b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(*id); sp->u.iocb_cmd.u.ctarg.req_size = GPN_ID_REQ_SIZE; sp->u.iocb_cmd.u.ctarg.rsp_size = GPN_ID_RSP_SIZE; @@ -3432,7 +3391,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id) ql_dbg(ql_dbg_disc, vha, 0x2067, "Async-%s hdl=%x ID %3phC.\n", sp->name, - sp->handle, ct_req->req.port_id.port_id); + sp->handle, &ct_req->req.port_id.port_id); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) @@ -3467,54 +3426,55 @@ done: void qla24xx_handle_gffid_event(scsi_qla_host_t *vha, struct event_arg *ea) { - fc_port_t *fcport = ea->fcport; + fc_port_t *fcport = ea->fcport; - qla24xx_post_gnl_work(vha, fcport); + qla24xx_post_gnl_work(vha, fcport); } -void qla24xx_async_gffid_sp_done(void *s, int res) +void qla24xx_async_gffid_sp_done(srb_t *sp, int res) { - struct srb *sp = s; - struct scsi_qla_host *vha = sp->vha; - fc_port_t *fcport = sp->fcport; - struct ct_sns_rsp *ct_rsp; - struct event_arg ea; - - ql_dbg(ql_dbg_disc, vha, 0x2133, - "Async done-%s res %x ID %x. %8phC\n", - sp->name, res, fcport->d_id.b24, fcport->port_name); - - fcport->flags &= ~FCF_ASYNC_SENT; - ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; - /* - * FC-GS-7, 5.2.3.12 FC-4 Features - format - * The format of the FC-4 Features object, as defined by the FC-4, - * Shall be an array of 4-bit values, one for each type code value - */ - if (!res) { - if (ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET] & 0xf) { - /* w1 b00:03 */ - fcport->fc4_type = - ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; - fcport->fc4_type &= 0xf; - } - - if (ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET] & 0xf) { - /* w5 [00:03]/28h */ - fcport->fc4f_nvme = - ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; - fcport->fc4f_nvme &= 0xf; - } - } - - memset(&ea, 0, sizeof(ea)); - ea.sp = sp; - ea.fcport = sp->fcport; - ea.rc = res; - ea.event = FCME_GFFID_DONE; - - qla2x00_fcport_event_handler(vha, &ea); - sp->free(sp); + struct scsi_qla_host *vha = sp->vha; + fc_port_t *fcport = sp->fcport; + struct ct_sns_rsp *ct_rsp; + struct event_arg ea; + uint8_t fc4_scsi_feat; + uint8_t fc4_nvme_feat; + + ql_dbg(ql_dbg_disc, vha, 0x2133, + "Async done-%s res %x ID %x. %8phC\n", + sp->name, res, fcport->d_id.b24, fcport->port_name); + + fcport->flags &= ~FCF_ASYNC_SENT; + ct_rsp = &fcport->ct_desc.ct_sns->p.rsp; + fc4_scsi_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET]; + fc4_nvme_feat = ct_rsp->rsp.gff_id.fc4_features[GFF_NVME_OFFSET]; + + /* + * FC-GS-7, 5.2.3.12 FC-4 Features - format + * The format of the FC-4 Features object, as defined by the FC-4, + * Shall be an array of 4-bit values, one for each type code value + */ + if (!res) { + if (fc4_scsi_feat & 0xf) { + /* w1 b00:03 */ + fcport->fc4_type = FS_FC4TYPE_FCP; + fcport->fc4_features = fc4_scsi_feat & 0xf; + } + + if (fc4_nvme_feat & 0xf) { + /* w5 [00:03]/28h */ + fcport->fc4_type |= FS_FC4TYPE_NVME; + fcport->fc4_features = fc4_nvme_feat & 0xf; + } + } + + memset(&ea, 0, sizeof(ea)); + ea.sp = sp; + ea.fcport = sp->fcport; + ea.rc = res; + + qla24xx_handle_gffid_event(vha, &ea); + sp->free(sp); } /* Get FC4 Feature with Nport ID. */ @@ -3608,7 +3568,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) u8 recheck = 0; u16 dup = 0, dup_cnt = 0; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); if (sp->gen1 != vha->hw->base_qpair->chip_reset) { @@ -3624,11 +3584,23 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); + goto out; } else { ql_dbg(ql_dbg_disc, vha, 0xffff, - "Fabric scan failed on all retries.\n"); + "%s: Fabric scan failed for %d retries.\n", + __func__, vha->scan.scan_retry); + /* + * Unable to scan any rports. logout loop below + * will unregister all sessions. + */ + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if ((fcport->flags & FCF_FABRIC_DEVICE) != 0) { + fcport->scan_state = QLA_FCPORT_SCAN; + fcport->logout_on_delete = 0; + } + } + goto login_logout; } - goto out; } vha->scan.scan_retry = 0; @@ -3674,7 +3646,6 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) list_for_each_entry(fcport, &vha->vp_fcports, list) { if (memcmp(rp->port_name, fcport->port_name, WWN_SIZE)) continue; - fcport->scan_needed = 0; fcport->scan_state = QLA_FCPORT_FOUND; found = true; /* @@ -3683,10 +3654,12 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) { qla2x00_clear_loop_id(fcport); fcport->flags |= FCF_FABRIC_DEVICE; - } else if (fcport->d_id.b24 != rp->id.b24) { + } else if (fcport->d_id.b24 != rp->id.b24 || + fcport->scan_needed) { qlt_schedule_sess_for_deletion(fcport); } fcport->d_id.b24 = rp->id.b24; + fcport->scan_needed = 0; break; } @@ -3705,6 +3678,7 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp) dup_cnt); } +login_logout: /* * Logout all previous fabric dev marked lost, except FCP2 devices. */ @@ -3898,9 +3872,8 @@ static void qla2x00_find_free_fcp_nvme_slot(struct scsi_qla_host *vha, } } -static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res) +static void qla2x00_async_gpnft_gnnft_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct ct_sns_req *ct_req = (struct ct_sns_req *)sp->u.iocb_cmd.u.ctarg.req; @@ -4053,9 +4026,6 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp, rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); goto done_free_sp; } @@ -4079,12 +4049,23 @@ done_free_sp: sp->free(sp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc, vha, 0xffff, + "%s: schedule\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); + + return rval; } /* GNNFT */ void qla24xx_async_gpnft_done(scsi_qla_host_t *vha, srb_t *sp) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); qla24xx_async_gnnft(vha, sp, sp->gen2); } @@ -4098,7 +4079,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) u32 rspsz; unsigned long flags; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s enter\n", __func__); if (!vha->flags.online) @@ -4107,14 +4088,15 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) spin_lock_irqsave(&vha->work_lock, flags); if (vha->scan.scan_flags & SF_SCANNING) { spin_unlock_irqrestore(&vha->work_lock, flags); - ql_dbg(ql_dbg_disc, vha, 0xffff, "scan active\n"); + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s: scan active\n", __func__); return rval; } vha->scan.scan_flags |= SF_SCANNING; spin_unlock_irqrestore(&vha->work_lock, flags); if (fc4_type == FC4_TYPE_FCP_SCSI) { - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s: Performing FCP Scan\n", __func__); if (sp) @@ -4152,7 +4134,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) rspsz, &sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL); - sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt); + sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = rspsz; if (!sp->u.iocb_cmd.u.ctarg.rsp) { ql_log(ql_log_warn, vha, 0xffff, "Failed to allocate ct_sns request.\n"); @@ -4169,7 +4151,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) } sp->u.iocb_cmd.u.ctarg.rsp_size = rspsz; - ql_dbg(ql_dbg_disc, vha, 0xffff, + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, "%s scan list size %d\n", __func__, vha->scan.size); memset(vha->scan.l, 0, vha->scan.size); @@ -4208,9 +4190,6 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp) rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { - spin_lock_irqsave(&vha->work_lock, flags); - vha->scan.scan_flags &= ~SF_SCANNING; - spin_unlock_irqrestore(&vha->work_lock, flags); goto done_free_sp; } @@ -4234,6 +4213,17 @@ done_free_sp: sp->free(sp); + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_flags &= ~SF_SCANNING; + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0xffff, + "%s: Scan scheduled.\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); + + return rval; } @@ -4261,9 +4251,8 @@ void qla24xx_handle_gnnid_event(scsi_qla_host_t *vha, struct event_arg *ea) qla24xx_post_gnl_work(vha, ea->fcport); } -static void qla2x00_async_gnnid_sp_done(void *s, int res) +static void qla2x00_async_gnnid_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; fc_port_t *fcport = sp->fcport; u8 *node_name = fcport->ct_desc.ct_sns->p.rsp.rsp.gnn_id.node_name; @@ -4279,13 +4268,12 @@ static void qla2x00_async_gnnid_sp_done(void *s, int res) ea.fcport = fcport; ea.sp = sp; ea.rc = res; - ea.event = FCME_GNNID_DONE; ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s res %x, WWPN %8phC %8phC\n", sp->name, res, fcport->port_name, fcport->node_name); - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gnnid_event(vha, &ea); sp->free(sp); } @@ -4299,7 +4287,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) return rval; - fcport->disc_state = DSC_GNN_ID; + qla2x00_set_fcport_disc_state(fcport, DSC_GNN_ID); sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); if (!sp) goto done; @@ -4318,9 +4306,7 @@ int qla24xx_async_gnnid(scsi_qla_host_t *vha, fc_port_t *fcport) GNN_ID_RSP_SIZE); /* GNN_ID req */ - ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; - ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; - ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); /* req & rsp use the same buffer */ @@ -4396,9 +4382,8 @@ void qla24xx_handle_gfpnid_event(scsi_qla_host_t *vha, struct event_arg *ea) qla24xx_post_gpsc_work(vha, fcport); } -static void qla2x00_async_gfpnid_sp_done(void *s, int res) +static void qla2x00_async_gfpnid_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; fc_port_t *fcport = sp->fcport; u8 *fpn = fcport->ct_desc.ct_sns->p.rsp.rsp.gfpn_id.port_name; @@ -4413,13 +4398,12 @@ static void qla2x00_async_gfpnid_sp_done(void *s, int res) ea.fcport = fcport; ea.sp = sp; ea.rc = res; - ea.event = FCME_GFPNID_DONE; ql_dbg(ql_dbg_disc, vha, 0x204f, "Async done-%s res %x, WWPN %8phC %8phC\n", sp->name, res, fcport->port_name, fcport->fabric_port_name); - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gfpnid_event(vha, &ea); sp->free(sp); } @@ -4450,9 +4434,7 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) GFPN_ID_RSP_SIZE); /* GFPN_ID req */ - ct_req->req.port_id.port_id[0] = fcport->d_id.b.domain; - ct_req->req.port_id.port_id[1] = fcport->d_id.b.area; - ct_req->req.port_id.port_id[2] = fcport->d_id.b.al_pa; + ct_req->req.port_id.port_id = port_id_to_be_id(fcport->d_id); /* req & rsp use the same buffer */ @@ -4479,7 +4461,6 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: return rval; } diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index da83034d4759..9e6b56527b25 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -17,7 +17,6 @@ #include <asm/prom.h> #endif -#include <target/target_core_base.h> #include "qla_target.h" /* @@ -37,8 +36,8 @@ static struct qla_chip_state_84xx *qla84xx_get_chip(struct scsi_qla_host *); static int qla84xx_init_chip(scsi_qla_host_t *); static int qla25xx_init_queues(struct qla_hw_data *); static int qla24xx_post_prli_work(struct scsi_qla_host*, fc_port_t *); -static void qla24xx_handle_plogi_done_event(struct scsi_qla_host *, - struct event_arg *); +static void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, + struct event_arg *ea); static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, struct event_arg *); static void __qla24xx_handle_gpdb_event(scsi_qla_host_t *, struct event_arg *); @@ -50,23 +49,14 @@ qla2x00_sp_timeout(struct timer_list *t) { srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; - struct req_que *req; - unsigned long flags; - struct qla_hw_data *ha = sp->vha->hw; - WARN_ON_ONCE(irqs_disabled()); - spin_lock_irqsave(&ha->hardware_lock, flags); - req = sp->qpair->req; - req->outstanding_cmds[sp->handle] = NULL; + WARN_ON(irqs_disabled()); iocb = &sp->u.iocb_cmd; - spin_unlock_irqrestore(&ha->hardware_lock, flags); iocb->timeout(sp); } -void -qla2x00_sp_free(void *ptr) +void qla2x00_sp_free(srb_t *sp) { - srb_t *sp = ptr; struct srb_iocb *iocb = &sp->u.iocb_cmd; del_timer(&iocb->timer); @@ -99,25 +89,53 @@ static void qla24xx_abort_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *abt = &sp->u.iocb_cmd; + struct qla_qpair *qpair = sp->qpair; + u32 handle; + unsigned long flags; + + if (sp->cmd_sp) + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout - cmd hdl=%x, cmd type=%x hdl=%x, type=%x\n", + sp->cmd_sp->handle, sp->cmd_sp->type, + sp->handle, sp->type); + else + ql_dbg(ql_dbg_async, sp->vha, 0x507c, + "Abort timeout 2 - hdl=%x, type=%x\n", + sp->handle, sp->type); + + spin_lock_irqsave(qpair->qp_lock_ptr, flags); + for (handle = 1; handle < qpair->req->num_outstanding_cmds; handle++) { + if (sp->cmd_sp && (qpair->req->outstanding_cmds[handle] == + sp->cmd_sp)) + qpair->req->outstanding_cmds[handle] = NULL; + + /* removing the abort */ + if (qpair->req->outstanding_cmds[handle] == sp) { + qpair->req->outstanding_cmds[handle] = NULL; + break; + } + } + spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + + if (sp->cmd_sp) + sp->cmd_sp->done(sp->cmd_sp, QLA_OS_TIMER_EXPIRED); abt->u.abt.comp_status = CS_TIMEOUT; - sp->done(sp, QLA_FUNCTION_TIMEOUT); + sp->done(sp, QLA_OS_TIMER_EXPIRED); } -static void qla24xx_abort_sp_done(void *ptr, int res) +static void qla24xx_abort_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct srb_iocb *abt = &sp->u.iocb_cmd; - if (del_timer(&sp->u.iocb_cmd.timer)) { - if (sp->flags & SRB_WAKEUP_ON_COMP) - complete(&abt->u.abt.comp); - else - sp->free(sp); - } + del_timer(&sp->u.iocb_cmd.timer); + if (sp->flags & SRB_WAKEUP_ON_COMP) + complete(&abt->u.abt.comp); + else + sp->free(sp); } -static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) +int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) { scsi_qla_host_t *vha = cmd_sp->vha; struct srb_iocb *abt_iocb; @@ -127,12 +145,13 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) sp = qla2xxx_get_qpair_sp(cmd_sp->vha, cmd_sp->qpair, cmd_sp->fcport, GFP_ATOMIC); if (!sp) - goto done; + return rval; abt_iocb = &sp->u.iocb_cmd; sp->type = SRB_ABT_CMD; sp->name = "abort"; sp->qpair = cmd_sp->qpair; + sp->cmd_sp = cmd_sp; if (wait) sp->flags = SRB_WAKEUP_ON_COMP; @@ -151,20 +170,18 @@ static int qla24xx_async_abort_cmd(srb_t *cmd_sp, bool wait) cmd_sp->type); rval = qla2x00_start_sp(sp); - if (rval != QLA_SUCCESS) - goto done_free_sp; + if (rval != QLA_SUCCESS) { + sp->free(sp); + return rval; + } if (wait) { wait_for_completion(&abt_iocb->u.abt.comp); rval = abt_iocb->u.abt.comp_status == CS_COMPLETE ? QLA_SUCCESS : QLA_FUNCTION_FAILED; - } else { - goto done; + sp->free(sp); } -done_free_sp: - sp->free(sp); -done: return rval; } @@ -218,6 +235,7 @@ qla2x00_async_iocb_timeout(void *data) case SRB_NACK_PRLI: case SRB_NACK_LOGO: case SRB_CTRL_VP: + default: rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); @@ -237,10 +255,8 @@ qla2x00_async_iocb_timeout(void *data) } } -static void -qla2x00_async_login_sp_done(void *ptr, int res) +static void qla2x00_async_login_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; @@ -252,14 +268,13 @@ qla2x00_async_login_sp_done(void *ptr, int res) if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); - ea.event = FCME_PLOGI_DONE; ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; ea.iop[0] = lio->u.logio.iop[0]; ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_plogi_done_event(vha, &ea); } sp->free(sp); @@ -289,17 +304,22 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) - goto done; + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); + return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); fcport->flags |= FCF_ASYNC_SENT; fcport->logout_completed = 0; - fcport->disc_state = DSC_LOGIN_PEND; sp->type = SRB_LOGIN_CMD; sp->name = "login"; sp->gen1 = fcport->rscn_gen; @@ -315,7 +335,7 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport, else lio->u.logio.flags |= SRB_LOGIN_COND_PLOGI; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI; ql_dbg(ql_dbg_disc, vha, 0x2072, @@ -341,11 +361,8 @@ done: return rval; } -static void -qla2x00_async_logout_sp_done(void *ptr, int res) +static void qla2x00_async_logout_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; - sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); sp->fcport->login_gen++; qlt_logo_completion_handler(sp->fcport, res); @@ -359,9 +376,6 @@ qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport) struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) - return rval; - fcport->flags |= FCF_ASYNC_SENT; sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -401,14 +415,12 @@ qla2x00_async_prlo_done(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->flags &= ~FCF_ASYNC_ACTIVE; /* Don't re-login in target mode */ if (!fcport->tgt_session) - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1); qlt_logo_completion_handler(fcport, data[0]); } -static void -qla2x00_async_prlo_sp_done(void *s, int res) +static void qla2x00_async_prlo_sp_done(srb_t *sp, int res) { - srb_t *sp = (srb_t *)s; struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; @@ -469,6 +481,9 @@ void qla24xx_handle_adisc_event(scsi_qla_host_t *vha, struct event_arg *ea) fcport->fw_login_state, ea->rc, fcport->login_gen, ea->sp->gen2, fcport->rscn_gen, ea->sp->gen1, fcport->loop_id); + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + if (ea->data[0] != MBS_COMMAND_COMPLETE) { ql_dbg(ql_dbg_disc, vha, 0x2066, "%s %8phC: adisc fail: post delete\n", @@ -508,13 +523,12 @@ static int qla_post_els_plogi_work(struct scsi_qla_host *vha, fc_port_t *fcport) e->u.fcport.fcport = fcport; fcport->flags |= FCF_ASYNC_ACTIVE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); return qla2x00_post_work(vha, e); } -static void -qla2x00_async_adisc_sp_done(void *ptr, int res) +static void qla2x00_async_adisc_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct srb_iocb *lio = &sp->u.iocb_cmd; @@ -526,7 +540,6 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) sp->fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); memset(&ea, 0, sizeof(ea)); - ea.event = FCME_ADISC_DONE; ea.rc = res; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; @@ -535,7 +548,7 @@ qla2x00_async_adisc_sp_done(void *ptr, int res) ea.fcport = sp->fcport; ea.sp = sp; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_adisc_event(vha, &ea); sp->free(sp); } @@ -662,7 +675,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, port_id_t id; u64 wwn; u16 data[2]; - u8 current_login_state; + u8 current_login_state, nvme_cls; fcport = ea->fcport; ql_dbg(ql_dbg_disc, vha, 0xffff, @@ -721,19 +734,24 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, loop_id = le16_to_cpu(e->nport_handle); loop_id = (loop_id & 0x7fff); - if (fcport->fc4f_nvme) - current_login_state = e->current_login_state >> 4; - else - current_login_state = e->current_login_state & 0xf; + nvme_cls = e->current_login_state >> 4; + current_login_state = e->current_login_state & 0xf; + if (PRLI_PHASE(nvme_cls)) { + current_login_state = nvme_cls; + fcport->fc4_type &= ~FS_FC4TYPE_FCP; + fcport->fc4_type |= FS_FC4TYPE_NVME; + } else if (PRLI_PHASE(current_login_state)) { + fcport->fc4_type |= FS_FC4TYPE_FCP; + fcport->fc4_type &= ~FS_FC4TYPE_NVME; + } ql_dbg(ql_dbg_disc, vha, 0x20e2, - "%s found %8phC CLS [%x|%x] nvme %d ID[%02x%02x%02x|%02x%02x%02x] lid[%d|%d]\n", + "%s found %8phC CLS [%x|%x] fc4_type %d ID[%06x|%06x] lid[%d|%d]\n", __func__, fcport->port_name, e->current_login_state, fcport->fw_login_state, - fcport->fc4f_nvme, id.b.domain, id.b.area, id.b.al_pa, - fcport->d_id.b.domain, fcport->d_id.b.area, - fcport->d_id.b.al_pa, loop_id, fcport->loop_id); + fcport->fc4_type, id.b24, fcport->d_id.b24, + loop_id, fcport->loop_id); switch (fcport->disc_state) { case DSC_DELETE_PEND: @@ -741,12 +759,15 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, break; default: if ((id.b24 != fcport->d_id.b24 && - fcport->d_id.b24) || + fcport->d_id.b24 && + fcport->loop_id != FC_NO_LOOP_ID) || (fcport->loop_id != FC_NO_LOOP_ID && fcport->loop_id != loop_id)) { ql_dbg(ql_dbg_disc, vha, 0x20e3, "%s %d %8phC post del sess\n", __func__, __LINE__, fcport->port_name); + if (fcport->n2n_flag) + fcport->d_id.b24 = 0; qlt_schedule_sess_for_deletion(fcport); return; } @@ -754,6 +775,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, } fcport->loop_id = loop_id; + if (fcport->n2n_flag) + fcport->d_id.b24 = id.b24; wwn = wwn_to_u64(fcport->port_name); qlt_find_sess_invalidate_other(vha, wwn, @@ -803,6 +826,16 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, fcport->fw_login_state = current_login_state; fcport->d_id = id; switch (current_login_state) { + case DSC_LS_PRLI_PEND: + /* + * In the middle of PRLI. Let it finish. + * Allow relogin code to recheck state again + * with GNL. Push disc_state back to DELETED + * so GNL can go out again + */ + qla2x00_set_fcport_disc_state(fcport, + DSC_DELETED); + break; case DSC_LS_PRLI_COMP: if ((e->prli_svc_param_word_3[0] & BIT_4) == 0) fcport->port_type = FCT_INITIATOR; @@ -877,7 +910,7 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, qla24xx_fcport_handle_login(vha, fcport); break; case ISP_CFG_N: - fcport->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(fcport, DSC_DELETED); if (time_after_eq(jiffies, fcport->dm_login_expire)) { if (fcport->n2n_link_reset_cnt < 2) { fcport->n2n_link_reset_cnt++; @@ -917,10 +950,8 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha, } } /* gnl_event */ -static void -qla24xx_async_gnl_sp_done(void *s, int res) +static void qla24xx_async_gnl_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; unsigned long flags; struct fc_port *fcport = NULL, *tf; @@ -943,7 +974,6 @@ qla24xx_async_gnl_sp_done(void *s, int res) memset(&ea, 0, sizeof(ea)); ea.sp = sp; ea.rc = res; - ea.event = FCME_GNL_DONE; if (sp->u.iocb_cmd.u.mbx.in_mb[1] >= sizeof(struct get_name_list_extended)) { @@ -960,8 +990,8 @@ qla24xx_async_gnl_sp_done(void *s, int res) set_bit(loop_id, vha->hw->loop_id_map); wwn = wwn_to_u64(e->port_name); - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20e8, - "%s %8phC %02x:%02x:%02x state %d/%d lid %x \n", + ql_dbg(ql_dbg_disc, vha, 0x20e8, + "%s %8phC %02x:%02x:%02x CLS %x/%x lid %x \n", __func__, (void *)&wwn, e->port_id[2], e->port_id[1], e->port_id[0], e->current_login_state, e->last_login_state, (loop_id & 0x7fff)); @@ -982,7 +1012,7 @@ qla24xx_async_gnl_sp_done(void *s, int res) spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); ea.fcport = fcport; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gnl_done_event(vha, &ea); } /* create new fcport if fw has knowledge of new sessions */ @@ -1019,6 +1049,16 @@ qla24xx_async_gnl_sp_done(void *s, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); vha->gnl.sent = 0; + if (!list_empty(&vha->gnl.fcports)) { + /* retrigger gnl */ + list_for_each_entry_safe(fcport, tf, &vha->gnl.fcports, + gnl_entry) { + list_del_init(&fcport->gnl_entry); + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + if (qla24xx_post_gnl_work(vha, fcport) == QLA_SUCCESS) + break; + } + } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); sp->free(sp); @@ -1040,7 +1080,7 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); fcport->flags |= FCF_ASYNC_SENT; - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; @@ -1089,8 +1129,8 @@ int qla24xx_async_gnl(struct scsi_qla_host *vha, fc_port_t *fcport) done_free_sp: sp->free(sp); - fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~(FCF_ASYNC_ACTIVE | FCF_ASYNC_SENT); return rval; } @@ -1107,10 +1147,8 @@ int qla24xx_post_gnl_work(struct scsi_qla_host *vha, fc_port_t *fcport) return qla2x00_post_work(vha, e); } -static -void qla24xx_async_gpdb_sp_done(void *s, int res) +static void qla24xx_async_gpdb_sp_done(srb_t *sp, int res) { - struct srb *sp = s; struct scsi_qla_host *vha = sp->vha; struct qla_hw_data *ha = vha->hw; fc_port_t *fcport = sp->fcport; @@ -1121,20 +1159,18 @@ void qla24xx_async_gpdb_sp_done(void *s, int res) "Async done-%s res %x, WWPN %8phC mb[1]=%x mb[2]=%x \n", sp->name, res, fcport->port_name, mb[1], mb[2]); - if (res == QLA_FUNCTION_TIMEOUT) { - dma_pool_free(sp->vha->hw->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, - sp->u.iocb_cmd.u.mbx.in_dma); - return; - } - fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + + if (res == QLA_FUNCTION_TIMEOUT) + goto done; + memset(&ea, 0, sizeof(ea)); - ea.event = FCME_GPDB_DONE; ea.fcport = fcport; ea.sp = sp; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_gpdb_event(vha, &ea); +done: dma_pool_free(ha->s_dma_pool, sp->u.iocb_cmd.u.mbx.in, sp->u.iocb_cmd.u.mbx.in_dma); @@ -1154,10 +1190,8 @@ static int qla24xx_post_prli_work(struct scsi_qla_host *vha, fc_port_t *fcport) return qla2x00_post_work(vha, e); } -static void -qla2x00_async_prli_sp_done(void *ptr, int res) +static void qla2x00_async_prli_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; struct event_arg ea; @@ -1170,7 +1204,6 @@ qla2x00_async_prli_sp_done(void *ptr, int res) if (!test_bit(UNLOADING, &vha->dpc_flags)) { memset(&ea, 0, sizeof(ea)); - ea.event = FCME_PRLI_DONE; ea.fcport = sp->fcport; ea.data[0] = lio->u.logio.data[0]; ea.data[1] = lio->u.logio.data[1]; @@ -1178,7 +1211,7 @@ qla2x00_async_prli_sp_done(void *ptr, int res) ea.iop[1] = lio->u.logio.iop[1]; ea.sp = sp; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_prli_done_event(vha, &ea); } sp->free(sp); @@ -1191,12 +1224,19 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) struct srb_iocb *lio; int rval = QLA_FUNCTION_FAILED; - if (!vha->flags.online) + if (!vha->flags.online) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } - if (fcport->fw_login_state == DSC_LS_PLOGI_PEND || - fcport->fw_login_state == DSC_LS_PRLI_PEND) + if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND || + fcport->fw_login_state == DSC_LS_PRLI_PEND) && + qla_dual_mode_enabled(vha)) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %d %8phC exit\n", + __func__, __LINE__, fcport->port_name); return rval; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) @@ -1215,13 +1255,13 @@ qla24xx_async_prli(struct scsi_qla_host *vha, fc_port_t *fcport) sp->done = qla2x00_async_prli_sp_done; lio->u.logio.flags = 0; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) lio->u.logio.flags |= SRB_LOGIN_NVME_PRLI; ql_dbg(ql_dbg_disc, vha, 0x211b, "Async-prli - %8phC hdl=%x, loopid=%x portid=%06x retries=%d %s.\n", fcport->port_name, sp->handle, fcport->loop_id, fcport->d_id.b24, - fcport->login_retry, fcport->fc4f_nvme ? "nvme" : "fc"); + fcport->login_retry, NVME_TARGET(vha->hw, fcport) ? "nvme" : "fc"); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -1262,15 +1302,20 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt) struct port_database_24xx *pd; struct qla_hw_data *ha = vha->hw; - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT)) + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) || + fcport->loop_id == FC_NO_LOOP_ID) { + ql_log(ql_log_warn, vha, 0xffff, + "%s: %8phC - not sending command.\n", + __func__, fcport->port_name); return rval; - - fcport->disc_state = DSC_GPDB; + } sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); if (!sp) goto done; + qla2x00_set_fcport_disc_state(fcport, DSC_GPDB); + fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_MB_IOCB; sp->name = "gpdb"; @@ -1319,6 +1364,7 @@ done_free_sp: sp->free(sp); fcport->flags &= ~FCF_ASYNC_SENT; done: + fcport->flags &= ~FCF_ASYNC_ACTIVE; qla24xx_post_gpdb_work(vha, fcport, opt); return rval; } @@ -1349,7 +1395,7 @@ void __qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) ql_dbg(ql_dbg_disc, vha, 0x20d6, "%s %d %8phC session revalidate success\n", __func__, __LINE__, ea->fcport->port_name); - ea->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_COMPLETE); } spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); } @@ -1367,14 +1413,14 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) fcport->flags &= ~FCF_ASYNC_SENT; ql_dbg(ql_dbg_disc, vha, 0x20d2, - "%s %8phC DS %d LS %d nvme %x rc %d\n", __func__, fcport->port_name, - fcport->disc_state, pd->current_login_state, fcport->fc4f_nvme, - ea->rc); + "%s %8phC DS %d LS %d fc4_type %x rc %d\n", __func__, + fcport->port_name, fcport->disc_state, pd->current_login_state, + fcport->fc4_type, ea->rc); if (fcport->disc_state == DSC_DELETE_PEND) return; - if (fcport->fc4f_nvme) + if (NVME_TARGET(vha->hw, fcport)) ls = pd->current_login_state >> 4; else ls = pd->current_login_state & 0xf; @@ -1403,7 +1449,7 @@ void qla24xx_handle_gpdb_event(scsi_qla_host_t *vha, struct event_arg *ea) /* Set discovery state back to GNL to Relogin attempt */ if (qla_dual_mode_enabled(vha) || qla_ini_mode_enabled(vha)) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); set_bit(RELOGIN_NEEDED, &vha->dpc_flags); } return; @@ -1473,7 +1519,7 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) u64 wwn; u16 sec; - ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x20d8, + ql_dbg(ql_dbg_disc, vha, 0x20d8, "%s %8phC DS %d LS %d P %d fl %x confl %p rscn %d|%d login %d lid %d scan %d\n", __func__, fcport->port_name, fcport->disc_state, fcport->fw_login_state, fcport->login_pause, fcport->flags, @@ -1484,19 +1530,21 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) return 0; if ((fcport->loop_id != FC_NO_LOOP_ID) && + qla_dual_mode_enabled(vha) && ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || (fcport->fw_login_state == DSC_LS_PRLI_PEND))) return 0; - if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { + if (fcport->fw_login_state == DSC_LS_PLOGI_COMP && + !N2N_TOPO(vha->hw)) { if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return 0; } } - /* for pure Target Mode. Login will not be initiated */ - if (vha->host->active_mode == MODE_TARGET) + /* Target won't initiate port login if fabric is present */ + if (vha->host->active_mode == MODE_TARGET && !N2N_TOPO(vha->hw)) return 0; if (fcport->flags & FCF_ASYNC_SENT) { @@ -1559,13 +1607,19 @@ int qla24xx_fcport_handle_login(struct scsi_qla_host *vha, fc_port_t *fcport) qla24xx_post_gpdb_work(vha, fcport, 0); } else { ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC post NVMe PRLI\n", - __func__, __LINE__, fcport->port_name); + "%s %d %8phC post %s PRLI\n", + __func__, __LINE__, fcport->port_name, + NVME_TARGET(vha->hw, fcport) ? "NVME" : + "FC"); qla24xx_post_prli_work(vha, fcport); } break; default: if (fcport->login_pause) { + ql_dbg(ql_dbg_disc, vha, 0x20d8, + "%s %d %8phC exit\n", + __func__, __LINE__, + fcport->port_name); fcport->last_rscn_gen = fcport->rscn_gen; fcport->last_login_gen = fcport->login_gen; set_bit(RELOGIN_NEEDED, &vha->dpc_flags); @@ -1636,12 +1690,34 @@ int qla24xx_post_newsess_work(struct scsi_qla_host *vha, port_id_t *id, return qla2x00_post_work(vha, e); } -static +void qla2x00_handle_rscn(scsi_qla_host_t *vha, struct event_arg *ea) +{ + fc_port_t *fcport; + unsigned long flags; + + fcport = qla2x00_find_fcport_by_nportid(vha, &ea->id, 1); + if (fcport) { + fcport->scan_needed = 1; + fcport->rscn_gen++; + } + + spin_lock_irqsave(&vha->work_lock, flags); + if (vha->scan.scan_flags == 0) { + ql_dbg(ql_dbg_disc, vha, 0xffff, "%s: schedule\n", __func__); + vha->scan.scan_flags |= SF_QUEUED; + schedule_delayed_work(&vha->scan.scan_work, 5); + } + spin_unlock_irqrestore(&vha->work_lock, flags); +} + void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, struct event_arg *ea) { fc_port_t *fcport = ea->fcport; + if (test_bit(UNLOADING, &vha->dpc_flags)) + return; + ql_dbg(ql_dbg_disc, vha, 0x2102, "%s %8phC DS %d LS %d P %d del %d cnfl %p rscn %d|%d login %d|%d fl %x\n", __func__, fcport->port_name, fcport->disc_state, @@ -1651,110 +1727,29 @@ void qla24xx_handle_relogin_event(scsi_qla_host_t *vha, fcport->last_login_gen, fcport->login_gen, fcport->flags); - if ((fcport->fw_login_state == DSC_LS_PLOGI_PEND) || - (fcport->fw_login_state == DSC_LS_PRLI_PEND)) - return; - - if (fcport->fw_login_state == DSC_LS_PLOGI_COMP) { - if (time_before_eq(jiffies, fcport->plogi_nack_done_deadline)) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - return; - } - } - if (fcport->last_rscn_gen != fcport->rscn_gen) { - ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gidpn\n", + ql_dbg(ql_dbg_disc, vha, 0x20e9, "%s %d %8phC post gnl\n", __func__, __LINE__, fcport->port_name); - + qla24xx_post_gnl_work(vha, fcport); return; } qla24xx_fcport_handle_login(vha, fcport); } - -static void qla_handle_els_plogi_done(scsi_qla_host_t *vha, +void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea) { + /* for pure Target Mode, PRLI will not be initiated */ + if (vha->host->active_mode == MODE_TARGET) + return; + ql_dbg(ql_dbg_disc, vha, 0x2118, "%s %d %8phC post PRLI\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } -void qla2x00_fcport_event_handler(scsi_qla_host_t *vha, struct event_arg *ea) -{ - fc_port_t *fcport; - - switch (ea->event) { - case FCME_RELOGIN: - if (test_bit(UNLOADING, &vha->dpc_flags)) - return; - - qla24xx_handle_relogin_event(vha, ea); - break; - case FCME_RSCN: - if (test_bit(UNLOADING, &vha->dpc_flags)) - return; - { - unsigned long flags; - - fcport = qla2x00_find_fcport_by_nportid - (vha, &ea->id, 1); - if (fcport) { - fcport->scan_needed = 1; - fcport->rscn_gen++; - } - - spin_lock_irqsave(&vha->work_lock, flags); - if (vha->scan.scan_flags == 0) { - ql_dbg(ql_dbg_disc, vha, 0xffff, - "%s: schedule\n", __func__); - vha->scan.scan_flags |= SF_QUEUED; - schedule_delayed_work(&vha->scan.scan_work, 5); - } - spin_unlock_irqrestore(&vha->work_lock, flags); - } - break; - case FCME_GNL_DONE: - qla24xx_handle_gnl_done_event(vha, ea); - break; - case FCME_GPSC_DONE: - qla24xx_handle_gpsc_event(vha, ea); - break; - case FCME_PLOGI_DONE: /* Initiator side sent LLIOCB */ - qla24xx_handle_plogi_done_event(vha, ea); - break; - case FCME_PRLI_DONE: - qla24xx_handle_prli_done_event(vha, ea); - break; - case FCME_GPDB_DONE: - qla24xx_handle_gpdb_event(vha, ea); - break; - case FCME_GPNID_DONE: - qla24xx_handle_gpnid_event(vha, ea); - break; - case FCME_GFFID_DONE: - qla24xx_handle_gffid_event(vha, ea); - break; - case FCME_ADISC_DONE: - qla24xx_handle_adisc_event(vha, ea); - break; - case FCME_GNNID_DONE: - qla24xx_handle_gnnid_event(vha, ea); - break; - case FCME_GFPNID_DONE: - qla24xx_handle_gfpnid_event(vha, ea); - break; - case FCME_ELS_PLOGI_DONE: - qla_handle_els_plogi_done(vha, ea); - break; - default: - BUG_ON(1); - break; - } -} - /* * RSCN(s) came in for this fcport, but the RSCN(s) was not able * to be consumed by the fcport @@ -1772,10 +1767,9 @@ void qla_rscn_replay(fc_port_t *fcport) if (fcport->scan_needed) { memset(&ea, 0, sizeof(ea)); - ea.event = FCME_RSCN; ea.id = fcport->d_id; ea.id.b.rsvd_1 = RSCN_PORT_ADDR; - qla2x00_fcport_event_handler(fcport->vha, &ea); + qla2x00_handle_rscn(fcport->vha, &ea); } } @@ -1784,15 +1778,27 @@ qla2x00_tmf_iocb_timeout(void *data) { srb_t *sp = data; struct srb_iocb *tmf = &sp->u.iocb_cmd; + int rc, h; + unsigned long flags; - tmf->u.tmf.comp_status = CS_TIMEOUT; - complete(&tmf->u.tmf.comp); + rc = qla24xx_async_abort_cmd(sp, false); + if (rc) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + tmf->u.tmf.comp_status = CS_TIMEOUT; + tmf->u.tmf.data = QLA_FUNCTION_FAILED; + complete(&tmf->u.tmf.comp); + } } -static void -qla2x00_tmf_sp_done(void *ptr, int res) +static void qla2x00_tmf_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct srb_iocb *tmf = &sp->u.iocb_cmd; complete(&tmf->u.tmf.comp); @@ -1890,6 +1896,9 @@ qla24xx_async_abort_command(srb_t *sp) static void qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: ql_dbg(ql_dbg_disc, vha, 0x2118, @@ -1914,22 +1923,31 @@ qla24xx_handle_prli_done_event(struct scsi_qla_host *vha, struct event_arg *ea) break; } - if (ea->fcport->n2n_flag) { + /* + * Retry PRLI with other FC-4 type if failure occurred on dual + * FCP/NVMe port + */ + if (NVME_FCP_TARGET(ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2118, - "%s %d %8phC post fc4 prli\n", - __func__, __LINE__, ea->fcport->port_name); - ea->fcport->fc4f_nvme = 0; - ea->fcport->n2n_flag = 0; - qla24xx_post_prli_work(vha, ea->fcport); + "%s %d %8phC post %s prli\n", + __func__, __LINE__, ea->fcport->port_name, + (ea->fcport->fc4_type & FS_FC4TYPE_NVME) ? + "NVMe" : "FCP"); + if (vha->hw->fc4_type_priority == FC4_PRIORITY_NVME) + ea->fcport->fc4_type &= ~FS_FC4TYPE_NVME; + else + ea->fcport->fc4_type &= ~FS_FC4TYPE_FCP; } - ql_dbg(ql_dbg_disc, vha, 0x2119, - "%s %d %8phC unhandle event of %x\n", - __func__, __LINE__, ea->fcport->port_name, ea->data[0]); + + ea->fcport->flags &= ~FCF_ASYNC_SENT; + ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; + qlt_schedule_sess_for_deletion(ea->fcport); break; } } -static void +void qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) { port_id_t cid; /* conflict Nport id */ @@ -1953,8 +1971,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } - if (fcport->disc_state == DSC_DELETE_PEND) + if ((fcport->disc_state == DSC_DELETE_PEND) || + (fcport->disc_state == DSC_DELETED)) { + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); return; + } if (ea->sp->gen2 != fcport->login_gen) { /* target side must have changed it. */ @@ -1972,6 +1993,9 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } + WARN_ONCE(!qla2xxx_is_valid_mbs(ea->data[0]), "mbs: %#x\n", + ea->data[0]); + switch (ea->data[0]) { case MBS_COMMAND_COMPLETE: /* @@ -1979,14 +2003,14 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) * force a relogin attempt via implicit LOGO, PLOGI, and PRLI * requests. */ - if (ea->fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, ea->fcport)) { ql_dbg(ql_dbg_disc, vha, 0x2117, "%s %d %8phC post prli\n", __func__, __LINE__, ea->fcport->port_name); qla24xx_post_prli_work(vha, ea->fcport); } else { ql_dbg(ql_dbg_disc, vha, 0x20ea, - "%s %d %8phC LoopID 0x%x in use with %06x. post gnl\n", + "%s %d %8phC LoopID 0x%x in use with %06x. post gpdb\n", __func__, __LINE__, ea->fcport->port_name, ea->fcport->loop_id, ea->fcport->d_id.b24); @@ -2006,11 +2030,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) __func__, __LINE__, ea->fcport->port_name, ea->data[1]); ea->fcport->flags &= ~FCF_ASYNC_SENT; - ea->fcport->disc_state = DSC_LOGIN_FAILED; + qla2x00_set_fcport_disc_state(ea->fcport, DSC_LOGIN_FAILED); if (ea->data[1] & QLA_LOGIO_LOGIN_RETRIED) set_bit(RELOGIN_NEEDED, &vha->dpc_flags); else - qla2x00_mark_device_lost(vha, ea->fcport, 1, 0); + qla2x00_mark_device_lost(vha, ea->fcport, 1); break; case MBS_LOOP_ID_USED: /* data[1] = IO PARAM 1 = nport ID */ @@ -2057,6 +2081,7 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) set_bit(lid, vha->hw->loop_id_map); ea->fcport->loop_id = lid; ea->fcport->keep_nport_handle = 0; + ea->fcport->logout_on_delete = 1; qlt_schedule_sess_for_deletion(ea->fcport); } break; @@ -2064,16 +2089,6 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea) return; } -void -qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, - uint16_t *data) -{ - qlt_logo_completion_handler(fcport, data[0]); - fcport->login_gen++; - fcport->flags &= ~FCF_ASYNC_ACTIVE; - return; -} - /****************************************************************************/ /* QLogic ISP2x00 Hardware Support Functions. */ /****************************************************************************/ @@ -2233,8 +2248,18 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) ql_dbg(ql_dbg_init, vha, 0x0061, "Configure NVRAM parameters...\n"); + /* Let priority default to FCP, can be overridden by nvram_config */ + ha->fc4_type_priority = FC4_PRIORITY_FCP; + ha->isp_ops->nvram_config(vha); + if (ha->fc4_type_priority != FC4_PRIORITY_FCP && + ha->fc4_type_priority != FC4_PRIORITY_NVME) + ha->fc4_type_priority = FC4_PRIORITY_FCP; + + ql_log(ql_log_info, vha, 0xffff, "FC4 priority set to %s\n", + ha->fc4_type_priority == FC4_PRIORITY_FCP ? "FCP" : "NVMe"); + if (ha->flags.disable_serdes) { /* Mask HBA via NVRAM settings? */ ql_log(ql_log_info, vha, 0x0077, @@ -2266,6 +2291,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha) if (qla_ini_mode_enabled(vha) || qla_dual_mode_enabled(vha)) rval = qla2x00_init_rings(vha); + /* No point in continuing if firmware initialization failed. */ + if (rval != QLA_SUCCESS) + return rval; + ha->flags.chip_reset_done = 1; if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { @@ -3082,103 +3111,113 @@ qla24xx_chip_diag(scsi_qla_host_t *vha) } static void -qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) +qla2x00_init_fce_trace(scsi_qla_host_t *vha) { int rval; dma_addr_t tc_dma; void *tc; struct qla_hw_data *ha = vha->hw; - if (ha->eft) { + if (!IS_FWI2_CAPABLE(ha)) + return; + + if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && + !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + return; + + if (ha->fce) { ql_dbg(ql_dbg_init, vha, 0x00bd, - "%s: Offload Mem is already allocated.\n", - __func__); + "%s: FCE Mem is already allocated.\n", + __func__); return; } - if (IS_FWI2_CAPABLE(ha)) { - /* Allocate memory for Fibre Channel Event Buffer. */ - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - goto try_eft; + /* Allocate memory for Fibre Channel Event Buffer. */ + tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, + GFP_KERNEL); + if (!tc) { + ql_log(ql_log_warn, vha, 0x00be, + "Unable to allocate (%d KB) for FCE.\n", + FCE_SIZE / 1024); + return; + } - if (ha->fce) - dma_free_coherent(&ha->pdev->dev, - FCE_SIZE, ha->fce, ha->fce_dma); + rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, + ha->fce_mb, &ha->fce_bufs); + if (rval) { + ql_log(ql_log_warn, vha, 0x00bf, + "Unable to initialize FCE (%d).\n", rval); + dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, tc_dma); + return; + } - /* Allocate memory for Fibre Channel Event Buffer. */ - tc = dma_alloc_coherent(&ha->pdev->dev, FCE_SIZE, &tc_dma, - GFP_KERNEL); - if (!tc) { - ql_log(ql_log_warn, vha, 0x00be, - "Unable to allocate (%d KB) for FCE.\n", - FCE_SIZE / 1024); - goto try_eft; - } - - rval = qla2x00_enable_fce_trace(vha, tc_dma, FCE_NUM_BUFFERS, - ha->fce_mb, &ha->fce_bufs); - if (rval) { - ql_log(ql_log_warn, vha, 0x00bf, - "Unable to initialize FCE (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, FCE_SIZE, tc, - tc_dma); - ha->flags.fce_enabled = 0; - goto try_eft; - } - ql_dbg(ql_dbg_init, vha, 0x00c0, - "Allocate (%d KB) for FCE...\n", FCE_SIZE / 1024); - - ha->flags.fce_enabled = 1; - ha->fce_dma = tc_dma; - ha->fce = tc; - -try_eft: - if (ha->eft) - dma_free_coherent(&ha->pdev->dev, - EFT_SIZE, ha->eft, ha->eft_dma); + ql_dbg(ql_dbg_init, vha, 0x00c0, + "Allocated (%d KB) for FCE...\n", FCE_SIZE / 1024); - /* Allocate memory for Extended Trace Buffer. */ - tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, - GFP_KERNEL); - if (!tc) { - ql_log(ql_log_warn, vha, 0x00c1, - "Unable to allocate (%d KB) for EFT.\n", - EFT_SIZE / 1024); - goto eft_err; - } + ha->flags.fce_enabled = 1; + ha->fce_dma = tc_dma; + ha->fce = tc; +} - rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x00c2, - "Unable to initialize EFT (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, - tc_dma); - goto eft_err; - } - ql_dbg(ql_dbg_init, vha, 0x00c3, - "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); +static void +qla2x00_init_eft_trace(scsi_qla_host_t *vha) +{ + int rval; + dma_addr_t tc_dma; + void *tc; + struct qla_hw_data *ha = vha->hw; - ha->eft_dma = tc_dma; - ha->eft = tc; + if (!IS_FWI2_CAPABLE(ha)) + return; + + if (ha->eft) { + ql_dbg(ql_dbg_init, vha, 0x00bd, + "%s: EFT Mem is already allocated.\n", + __func__); + return; } -eft_err: - return; + /* Allocate memory for Extended Trace Buffer. */ + tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, + GFP_KERNEL); + if (!tc) { + ql_log(ql_log_warn, vha, 0x00c1, + "Unable to allocate (%d KB) for EFT.\n", + EFT_SIZE / 1024); + return; + } + + rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); + if (rval) { + ql_log(ql_log_warn, vha, 0x00c2, + "Unable to initialize EFT (%d).\n", rval); + dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, tc_dma); + return; + } + + ql_dbg(ql_dbg_init, vha, 0x00c3, + "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); + + ha->eft_dma = tc_dma; + ha->eft = tc; +} + +static void +qla2x00_alloc_offload_mem(scsi_qla_host_t *vha) +{ + qla2x00_init_fce_trace(vha); + qla2x00_init_eft_trace(vha); } void qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) { - int rval; uint32_t dump_size, fixed_size, mem_size, req_q_size, rsp_q_size, eft_size, fce_size, mq_size; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; struct qla2xxx_fw_dump *fw_dump; - dma_addr_t tc_dma; - void *tc; dump_size = fixed_size = mem_size = eft_size = fce_size = mq_size = 0; req_q_size = rsp_q_size = 0; @@ -3216,37 +3255,13 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha) } if (ha->tgt.atio_ring) mq_size += ha->tgt.atio_q_length * sizeof(request_t); - /* Allocate memory for Fibre Channel Event Buffer. */ - if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha) && - !IS_QLA27XX(ha) && !IS_QLA28XX(ha)) - goto try_eft; - fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; -try_eft: + qla2x00_init_fce_trace(vha); + if (ha->fce) + fce_size = sizeof(struct qla2xxx_fce_chain) + FCE_SIZE; + qla2x00_init_eft_trace(vha); if (ha->eft) - dma_free_coherent(&ha->pdev->dev, - EFT_SIZE, ha->eft, ha->eft_dma); - - /* Allocate memory for Extended Trace Buffer. */ - tc = dma_alloc_coherent(&ha->pdev->dev, EFT_SIZE, &tc_dma, - GFP_KERNEL); - if (!tc) { - ql_log(ql_log_warn, vha, 0x00c1, - "Unable to allocate (%d KB) for EFT.\n", - EFT_SIZE / 1024); - goto allocate; - } - - rval = qla2x00_enable_eft_trace(vha, tc_dma, EFT_NUM_BUFFERS); - if (rval) { - ql_log(ql_log_warn, vha, 0x00c2, - "Unable to initialize EFT (%d).\n", rval); - dma_free_coherent(&ha->pdev->dev, EFT_SIZE, tc, - tc_dma); - } - ql_dbg(ql_dbg_init, vha, 0x00c3, - "Allocated (%d KB) EFT ...\n", EFT_SIZE / 1024); - eft_size = EFT_SIZE; + eft_size = EFT_SIZE; } if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { @@ -3255,7 +3270,7 @@ try_eft: for (j = 0; j < 2; j++, fwdt++) { if (!fwdt->template) { - ql_log(ql_log_warn, vha, 0x00ba, + ql_dbg(ql_dbg_init, vha, 0x00ba, "-> fwdt%u no template\n", j); continue; } @@ -3268,24 +3283,22 @@ try_eft: j, fwdt->dump_size); dump_size += fwdt->dump_size; } - goto allocate; + } else { + req_q_size = req->length * sizeof(request_t); + rsp_q_size = rsp->length * sizeof(response_t); + dump_size = offsetof(struct qla2xxx_fw_dump, isp); + dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + + eft_size; + ha->chain_offset = dump_size; + dump_size += mq_size + fce_size; + if (ha->exchoffld_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exchoffld_size; + if (ha->exlogin_buf) + dump_size += sizeof(struct qla2xxx_offld_chain) + + ha->exlogin_size; } - req_q_size = req->length * sizeof(request_t); - rsp_q_size = rsp->length * sizeof(response_t); - dump_size = offsetof(struct qla2xxx_fw_dump, isp); - dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size; - ha->chain_offset = dump_size; - dump_size += mq_size + fce_size; - - if (ha->exchoffld_buf) - dump_size += sizeof(struct qla2xxx_offld_chain) + - ha->exchoffld_size; - if (ha->exlogin_buf) - dump_size += sizeof(struct qla2xxx_offld_chain) + - ha->exlogin_size; - -allocate: if (!ha->fw_dump_len || dump_size > ha->fw_dump_alloc_len) { ql_dbg(ql_dbg_init, vha, 0x00c5, @@ -4400,7 +4413,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha) inline void qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, - char *def) + const char *def) { char *st, *en; uint16_t index; @@ -4412,7 +4425,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (len > sizeof(zero)) len = sizeof(zero); if (memcmp(model, &zero, len) != 0) { - strncpy(ha->model_number, model, len); + memcpy(ha->model_number, model, len); st = en = ha->model_number; en += len - 1; while (en > st) { @@ -4425,21 +4438,23 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len, if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) - strncpy(ha->model_desc, + strlcpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], - sizeof(ha->model_desc) - 1); + sizeof(ha->model_desc)); } else { index = (ha->pdev->subsystem_device & 0xff); if (use_tbl && ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC && index < QLA_MODEL_NAMES) { - strcpy(ha->model_number, - qla2x00_model_name[index * 2]); - strncpy(ha->model_desc, + strlcpy(ha->model_number, + qla2x00_model_name[index * 2], + sizeof(ha->model_number)); + strlcpy(ha->model_desc, qla2x00_model_name[index * 2 + 1], - sizeof(ha->model_desc) - 1); + sizeof(ha->model_desc)); } else { - strcpy(ha->model_number, def); + strlcpy(ha->model_number, def, + sizeof(ha->model_number)); } } if (IS_FWI2_CAPABLE(ha)) @@ -4575,20 +4590,6 @@ qla2x00_nvram_config(scsi_qla_host_t *vha) rval = 1; } -#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_SGI_SN2) - /* - * The SN2 does not provide BIOS emulation which means you can't change - * potentially bogus BIOS settings. Force the use of default settings - * for link rate and frame size. Hope that the rest of the settings - * are valid. - */ - if (ia64_platform_is("sn2")) { - nv->frame_payload_size = 2048; - if (IS_QLA23XX(ha)) - nv->special_options[1] = BIT_7; - } -#endif - /* Reset Initialization control block */ memset(icb, 0, ha->init_cb_size); @@ -4881,6 +4882,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) } INIT_WORK(&fcport->del_work, qla24xx_delete_sess_fn); + INIT_WORK(&fcport->free_work, qlt_free_session_done); INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); @@ -4948,25 +4950,15 @@ qla2x00_configure_loop(scsi_qla_host_t *vha) qla2x00_get_data_rate(vha); /* Determine what we need to do */ - if (ha->current_topology == ISP_CFG_FL && - (test_bit(LOCAL_LOOP_UPDATE, &flags))) { - - set_bit(RSCN_UPDATE, &flags); - - } else if (ha->current_topology == ISP_CFG_F && + if ((ha->current_topology == ISP_CFG_FL || + ha->current_topology == ISP_CFG_F) && (test_bit(LOCAL_LOOP_UPDATE, &flags))) { set_bit(RSCN_UPDATE, &flags); clear_bit(LOCAL_LOOP_UPDATE, &flags); - } else if (ha->current_topology == ISP_CFG_N) { - clear_bit(RSCN_UPDATE, &flags); - if (qla_tgt_mode_enabled(vha)) { - /* allow the other side to start the login */ - clear_bit(LOCAL_LOOP_UPDATE, &flags); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } - } else if (ha->current_topology == ISP_CFG_NL) { + } else if (ha->current_topology == ISP_CFG_NL || + ha->current_topology == ISP_CFG_N) { clear_bit(RSCN_UPDATE, &flags); set_bit(LOCAL_LOOP_UPDATE, &flags); } else if (!vha->flags.online || @@ -5058,35 +5050,53 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) uint16_t index; uint16_t entries; - char *id_iter; + struct gid_list_info *gid; uint16_t loop_id; uint8_t domain, area, al_pa; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Inititae N2N login. */ - if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { - /* borrowing */ - u32 *bp, i, sz; - - memset(ha->init_cb, 0, ha->init_cb_size); - sz = min_t(int, sizeof(struct els_plogi_payload), - ha->init_cb_size); - rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma, - (void *)ha->init_cb, sz); - if (rval == QLA_SUCCESS) { - bp = (uint32_t *)ha->init_cb; - for (i = 0; i < sz/4 ; i++, bp++) - *bp = cpu_to_be32(*bp); + if (N2N_TOPO(ha)) { + if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) { + /* borrowing */ + u32 *bp, i, sz; + + memset(ha->init_cb, 0, ha->init_cb_size); + sz = min_t(int, sizeof(struct els_plogi_payload), + ha->init_cb_size); + rval = qla24xx_get_port_login_templ(vha, + ha->init_cb_dma, (void *)ha->init_cb, sz); + if (rval == QLA_SUCCESS) { + bp = (uint32_t *)ha->init_cb; + for (i = 0; i < sz/4 ; i++, bp++) + *bp = cpu_to_be32(*bp); - memcpy(&ha->plogi_els_payld.data, (void *)ha->init_cb, - sizeof(ha->plogi_els_payld.data)); - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } else { - ql_dbg(ql_dbg_init, vha, 0x00d1, - "PLOGI ELS param read fail.\n"); + memcpy(&ha->plogi_els_payld.data, + (void *)ha->init_cb, + sizeof(ha->plogi_els_payld.data)); + } else { + ql_dbg(ql_dbg_init, vha, 0x00d1, + "PLOGI ELS param read fail.\n"); + goto skip_login; + } + } + + list_for_each_entry(fcport, &vha->vp_fcports, list) { + if (fcport->n2n_flag) { + qla24xx_fcport_handle_login(vha, fcport); + return QLA_SUCCESS; + } + } +skip_login: + spin_lock_irqsave(&vha->work_lock, flags); + vha->scan.scan_retry++; + spin_unlock_irqrestore(&vha->work_lock, flags); + + if (vha->scan.scan_retry < MAX_SCAN_RETRIES) { + set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); + set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } - return QLA_SUCCESS; } found_devs = 0; @@ -5098,7 +5108,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) rval = qla2x00_get_id_list(vha, ha->gid_list, ha->gid_list_dma, &entries); if (rval != QLA_SUCCESS) - goto cleanup_allocation; + goto err; ql_dbg(ql_dbg_disc, vha, 0x2011, "Entries in ID list (%d).\n", entries); @@ -5128,23 +5138,21 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0x2012, "Memory allocation failed for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; - goto cleanup_allocation; + goto err; } new_fcport->flags &= ~FCF_FABRIC_DEVICE; /* Add devices to port list. */ - id_iter = (char *)ha->gid_list; + gid = ha->gid_list; for (index = 0; index < entries; index++) { - domain = ((struct gid_list_info *)id_iter)->domain; - area = ((struct gid_list_info *)id_iter)->area; - al_pa = ((struct gid_list_info *)id_iter)->al_pa; + domain = gid->domain; + area = gid->area; + al_pa = gid->al_pa; if (IS_QLA2100(ha) || IS_QLA2200(ha)) - loop_id = (uint16_t) - ((struct gid_list_info *)id_iter)->loop_id_2100; + loop_id = gid->loop_id_2100; else - loop_id = le16_to_cpu( - ((struct gid_list_info *)id_iter)->loop_id); - id_iter += ha->gid_list_info_size; + loop_id = le16_to_cpu(gid->loop_id); + gid = (void *)gid + ha->gid_list_info_size; /* Bypass reserved domain fields. */ if ((domain & 0xf0) == 0xf0) @@ -5220,7 +5228,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) ql_log(ql_log_warn, vha, 0xd031, "Failed to allocate memory for fcport.\n"); rval = QLA_MEMORY_ALLOC_FAILED; - goto cleanup_allocation; + goto err; } spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); new_fcport->flags &= ~FCF_FABRIC_DEVICE; @@ -5243,7 +5251,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && @@ -5263,15 +5271,14 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha) qla24xx_fcport_handle_login(vha, fcport); } -cleanup_allocation: - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); - if (rval != QLA_SUCCESS) { - ql_dbg(ql_dbg_disc, vha, 0x2098, - "Configure local loop error exit: rval=%x.\n", rval); - } + return rval; - return (rval); +err: + ql_dbg(ql_dbg_disc, vha, 0x2098, + "Configure local loop error exit: rval=%x.\n", rval); + return rval; } static void @@ -5369,7 +5376,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) "%s %8phN. rport %p is %s mode\n", __func__, fcport->port_name, rport, (fcport->port_type == FCT_TARGET) ? "tgt" : - ((fcport->port_type & FCT_NVME) ? "nvme" :"ini")); + ((fcport->port_type & FCT_NVME) ? "nvme" : "ini")); fc_remote_port_rolechg(rport, rport_ids.roles); } @@ -5398,11 +5405,14 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) ql_dbg(ql_dbg_disc, vha, 0x20ef, "%s %8phC\n", __func__, fcport->port_name); - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); fcport->login_retry = vha->hw->login_retry_count; fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); fcport->deleted = 0; - fcport->logout_on_delete = 1; + if (vha->hw->current_topology == ISP_CFG_NL) + fcport->logout_on_delete = 0; + else + fcport->logout_on_delete = 1; fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0; switch (vha->hw->current_topology) { @@ -5416,9 +5426,9 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) qla2x00_iidma_fcport(vha, fcport); - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { qla_nvme_register_remote(vha, fcport); - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); qla2x00_set_fcport_state(fcport, FCS_ONLINE); return; } @@ -5463,7 +5473,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) } } - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); } void qla_register_fcport_fn(struct work_struct *work) @@ -5744,11 +5754,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) new_fcport->fc4_type = swl[swl_idx].fc4_type; new_fcport->nvme_flag = 0; - new_fcport->fc4f_nvme = 0; if (vha->flags.nvme_enabled && - swl[swl_idx].fc4f_nvme) { - new_fcport->fc4f_nvme = - swl[swl_idx].fc4f_nvme; + swl[swl_idx].fc4_type & FS_FC4TYPE_NVME) { ql_log(ql_log_info, vha, 0x2131, "FOUND: NVME port %8phC as FC Type 28h\n", new_fcport->port_name); @@ -5804,7 +5811,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) /* Bypass ports whose FCP-4 type is not FCP_SCSI */ if (ql2xgffidenable && - (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI && + (!(new_fcport->fc4_type & FS_FC4TYPE_FCP) && new_fcport->fc4_type != FC4_TYPE_UNKNOWN)) continue; @@ -5873,9 +5880,9 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) break; } - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { if (fcport->disc_state == DSC_DELETE_PEND) { - fcport->disc_state = DSC_GNL; + qla2x00_set_fcport_disc_state(fcport, DSC_GNL); vha->fcport_count--; fcport->login_succ = 0; } @@ -5913,8 +5920,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) break; - if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 || - (fcport->flags & FCF_LOGIN_NEEDED) == 0) + if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) continue; if (fcport->scan_state == QLA_FCPORT_SCAN) { @@ -5922,7 +5928,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) qla_ini_mode_enabled(vha)) && atomic_read(&fcport->state) == FCS_ONLINE) { qla2x00_mark_device_lost(vha, fcport, - ql2xplogiabsentdevice, 0); + ql2xplogiabsentdevice); if (fcport->loop_id != FC_NO_LOOP_ID && (fcport->flags & FCF_FCP2_DEVICE) == 0 && fcport->port_type != FCT_INITIATOR && @@ -5937,7 +5943,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha) } } - if (fcport->scan_state == QLA_FCPORT_FOUND) + if (fcport->scan_state == QLA_FCPORT_FOUND && + (fcport->flags & FCF_LOGIN_NEEDED) != 0) qla24xx_fcport_handle_login(vha, fcport); } return (rval); @@ -6087,7 +6094,7 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport, ha->isp_ops->fabric_logout(vha, fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - qla2x00_mark_device_lost(vha, fcport, 1, 0); + qla2x00_mark_device_lost(vha, fcport, 1); rval = 1; break; @@ -6601,16 +6608,17 @@ qla2x00_quiesce_io(scsi_qla_host_t *vha) atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); list_for_each_entry(vp, &ha->vp_list, list) - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* Wait for pending cmds to complete */ - qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST); + WARN_ON_ONCE(qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) + != QLA_SUCCESS); } void @@ -6678,14 +6686,14 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); @@ -6698,8 +6706,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha) } /* Clear all async request states across all VPs. */ - list_for_each_entry(fcport, &vha->vp_fcports, list) + list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); + fcport->scan_state = 0; + } spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); @@ -7533,8 +7543,12 @@ qla27xx_get_active_image(struct scsi_qla_host *vha, goto check_sec_image; } - qla24xx_read_flash_data(vha, (void *)(&pri_image_status), - ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2); + if (qla24xx_read_flash_data(vha, (void *)(&pri_image_status), + ha->flt_region_img_status_pri, sizeof(pri_image_status) >> 2) != + QLA_SUCCESS) { + WARN_ON_ONCE(true); + goto check_sec_image; + } qla27xx_print_image(vha, "Primary image", &pri_image_status); if (qla27xx_check_image_status_signature(&pri_image_status)) { @@ -8288,7 +8302,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); } - qla24xx_read_flash_data(vha, ha->vpd, faddr, ha->vpd_size >> 2); + ha->isp_ops->read_optrom(vha, ha->vpd, faddr << 2, ha->vpd_size); /* Get NVRAM data into cache and calculate checksum. */ faddr = ha->flt_region_nvram; @@ -8300,7 +8314,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) "Loading %s nvram image.\n", active_regions.aux.vpd_nvram == QLA27XX_PRIMARY_IMAGE ? "primary" : "secondary"); - qla24xx_read_flash_data(vha, ha->nvram, faddr, ha->nvram_size >> 2); + ha->isp_ops->read_optrom(vha, ha->nvram, faddr << 2, ha->nvram_size); dptr = (uint32_t *)nv; for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++, dptr++) @@ -8541,6 +8555,9 @@ qla81xx_nvram_config(scsi_qla_host_t *vha) /* N2N: driver will initiate Login instead of FW */ icb->firmware_options_3 |= BIT_8; + /* Determine NVMe/FCP priority for target ports */ + ha->fc4_type_priority = qla2xxx_get_fc4_priority(vha); + if (rval) { ql_log(ql_log_warn, vha, 0x0076, "NVRAM configuration failed.\n"); @@ -9030,8 +9047,6 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair) struct qla_hw_data *ha = qpair->hw; qpair->delete_in_progress = 1; - while (atomic_read(&qpair->ref_count)) - msleep(500); ret = qla25xx_delete_req_que(vha, qpair->req); if (ret != QLA_SUCCESS) diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index bf063c664352..364b3db8b2dc 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -105,6 +105,30 @@ qla2x00_clean_dsd_pool(struct qla_hw_data *ha, struct crc_context *ctx) INIT_LIST_HEAD(&ctx->dsd_list); } +static inline void +qla2x00_set_fcport_disc_state(fc_port_t *fcport, int state) +{ + int old_val; + uint8_t shiftbits, mask; + + /* This will have to change when the max no. of states > 16 */ + shiftbits = 4; + mask = (1 << shiftbits) - 1; + + fcport->disc_state = state; + while (1) { + old_val = atomic_read(&fcport->shadow_disc_state); + if (old_val == atomic_cmpxchg(&fcport->shadow_disc_state, + old_val, (old_val << shiftbits) | state)) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2134, + "FCPort %8phC disc_state transition: %s to %s - portid=%06x.\n", + fcport->port_name, port_dstate_str[old_val & mask], + port_dstate_str[state], fcport->d_id.b24); + return; + } + } +} + static inline int qla2x00_hba_err_chk_enabled(srb_t *sp) { @@ -152,6 +176,18 @@ qla2x00_chip_is_down(scsi_qla_host_t *vha) return (qla2x00_reset_active(vha) || !vha->hw->flags.fw_started); } +static void qla2xxx_init_sp(srb_t *sp, scsi_qla_host_t *vha, + struct qla_qpair *qpair, fc_port_t *fcport) +{ + memset(sp, 0, sizeof(*sp)); + sp->fcport = fcport; + sp->iocbs = 1; + sp->vha = vha; + sp->qpair = qpair; + sp->cmd_type = TYPE_SRB; + INIT_LIST_HEAD(&sp->elem); +} + static inline srb_t * qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, fc_port_t *fcport, gfp_t flag) @@ -164,19 +200,9 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair, return NULL; sp = mempool_alloc(qpair->srb_mempool, flag); - if (!sp) - goto done; - - memset(sp, 0, sizeof(*sp)); - sp->fcport = fcport; - sp->iocbs = 1; - sp->vha = vha; - sp->qpair = qpair; - sp->cmd_type = TYPE_SRB; - INIT_LIST_HEAD(&sp->elem); - -done: - if (!sp) + if (sp) + qla2xxx_init_sp(sp, vha, qpair, fcport); + else QLA_QPAIR_MARK_NOT_BUSY(qpair); return sp; } @@ -305,3 +331,15 @@ qla_83xx_start_iocbs(struct qla_qpair *qpair) WRT_REG_DWORD(req->req_q_in, req->ring_index); } + +static inline int +qla2xxx_get_fc4_priority(struct scsi_qla_host *vha) +{ + uint32_t data; + + data = + ((uint8_t *)vha->hw->nvram)[NVRAM_DUAL_FCP_NVME_FLAG_OFFSET]; + + + return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME; +} diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 9312b19ed708..47bf60a9490a 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -292,6 +292,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt, } } +/* + * Find the first handle that is not in use, starting from + * req->current_outstanding_cmd + 1. The caller must hold the lock that is + * associated with @req. + */ +uint32_t qla2xxx_get_next_handle(struct req_que *req) +{ + uint32_t index, handle = req->current_outstanding_cmd; + + for (index = 1; index < req->num_outstanding_cmds; index++) { + handle++; + if (handle == req->num_outstanding_cmds) + handle = 1; + if (!req->outstanding_cmds[handle]) + return handle; + } + + return 0; +} + /** * qla2x00_start_scsi() - Send a SCSI command to the ISP * @sp: command to send to the ISP @@ -306,7 +326,6 @@ qla2x00_start_scsi(srb_t *sp) scsi_qla_host_t *vha; struct scsi_cmnd *cmd; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; cmd_entry_t *cmd_pkt; uint16_t cnt; @@ -339,16 +358,8 @@ qla2x00_start_scsi(srb_t *sp) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ @@ -610,7 +621,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, } cur_seg = scsi_sglist(cmd); - ctx = GET_CMD_CTX_SP(sp); + ctx = sp->u.scmd.ct6_ctx; while (tot_dsds) { avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ? @@ -943,8 +954,7 @@ alloc_and_fill: if (sp) { list_add_tail(&dsd_ptr->list, - &((struct crc_context *) - sp->u.scmd.ctx)->dsd_list); + &sp->u.scmd.crc_ctx->dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { @@ -1041,8 +1051,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, if (sp) { list_add_tail(&dsd_ptr->list, - &((struct crc_context *) - sp->u.scmd.ctx)->dsd_list); + &sp->u.scmd.crc_ctx->dsd_list); sp->flags |= SRB_CRC_CTX_DSD_VALID; } else { @@ -1088,7 +1097,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, sgl = scsi_prot_sglist(cmd); vha = sp->vha; - difctx = sp->u.scmd.ctx; + difctx = sp->u.scmd.crc_ctx; direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE; ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021, "%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n", @@ -1364,6 +1373,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp, cur_dsd++; return 0; } + /** * qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command * Type 6 IOCB types. @@ -1427,7 +1437,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, bundling = 0; /* Allocate CRC context from global pool */ - crc_ctx_pkt = sp->u.scmd.ctx = + crc_ctx_pkt = sp->u.scmd.crc_ctx = dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma); if (!crc_ctx_pkt) @@ -1515,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, } if (!bundling) { - cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd; + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; } else { /* * Configure Bundling if we need to fetch interlaving @@ -1525,7 +1535,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds - tot_prot_dsds); - cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd; + cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; } /* Finish the common fields of CRC pkt */ @@ -1583,7 +1593,6 @@ qla24xx_start_scsi(srb_t *sp) int nseg; unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; struct cmd_type_7 *cmd_pkt; uint16_t cnt; @@ -1611,16 +1620,8 @@ qla24xx_start_scsi(srb_t *sp) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ @@ -1723,7 +1724,6 @@ qla24xx_dif_start_scsi(srb_t *sp) int nseg; unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; uint16_t cnt; uint16_t req_cnt = 0; @@ -1764,17 +1764,8 @@ qla24xx_dif_start_scsi(srb_t *sp) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Compute number of required data segments */ @@ -1919,7 +1910,6 @@ qla2xxx_start_scsi_mq(srb_t *sp) int nseg; unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; struct cmd_type_7 *cmd_pkt; uint16_t cnt; @@ -1950,16 +1940,8 @@ qla2xxx_start_scsi_mq(srb_t *sp) vha->marker_needed = 0; } - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ @@ -2063,7 +2045,6 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) int nseg; unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; uint16_t cnt; uint16_t req_cnt = 0; @@ -2118,17 +2099,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp) vha->marker_needed = 0; } - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Compute number of required data segments */ @@ -2275,7 +2247,7 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) struct qla_hw_data *ha = vha->hw; struct req_que *req = qpair->req; device_reg_t *reg = ISP_QUE_REG(ha, req->id); - uint32_t index, handle; + uint32_t handle; request_t *pkt; uint16_t cnt, req_cnt; @@ -2315,16 +2287,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp) goto queuing_error; if (sp) { - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) { + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { ql_log(ql_log_warn, vha, 0x700b, "No room on outstanding cmd array.\n"); goto queuing_error; @@ -2441,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx) static void qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio) { + u16 control_flags = LCF_COMMAND_LOGO; logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; - logio->control_flags = - cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); - if (!sp->fcport->keep_nport_handle) - logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT); + + if (sp->fcport->explicit_logout) { + control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT; + } else { + control_flags |= LCF_IMPL_LOGO; + + if (!sp->fcport->keep_nport_handle) + control_flags |= LCF_FREE_NPORT; + } + + logio->control_flags = cpu_to_le16(control_flags); logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); logio->port_id[0] = sp->fcport->d_id.b.al_pa; logio->port_id[1] = sp->fcport->d_id.b.area; @@ -2540,13 +2512,11 @@ void qla2x00_init_timer(srb_t *sp, unsigned long tmo) sp->free = qla2x00_sp_free; if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD) init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp); - add_timer(&sp->u.iocb_cmd.timer); + sp->start_timer = 1; } -static void -qla2x00_els_dcmd_sp_free(void *data) +static void qla2x00_els_dcmd_sp_free(srb_t *sp) { - srb_t *sp = data; struct srb_iocb *elsio = &sp->u.iocb_cmd; kfree(sp->fcport); @@ -2567,19 +2537,36 @@ qla2x00_els_dcmd_iocb_timeout(void *data) fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; struct srb_iocb *lio = &sp->u.iocb_cmd; + unsigned long flags = 0; + int res, h; ql_dbg(ql_dbg_io, vha, 0x3069, "%s Timeout, hdl=%x, portid=%02x%02x%02x\n", sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa); - complete(&lio->u.els_logo.comp); + /* Abort the exchange */ + res = qla24xx_async_abort_cmd(sp, false); + if (res) { + ql_dbg(ql_dbg_io, vha, 0x3070, + "mbx abort_command failed.\n"); + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + complete(&lio->u.els_logo.comp); + } else { + ql_dbg(ql_dbg_io, vha, 0x3071, + "mbx abort_command success.\n"); + } } -static void -qla2x00_els_dcmd_sp_done(void *ptr, int res) +static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; @@ -2657,6 +2644,10 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode, memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld, sizeof(struct els_logo_payload)); + ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:"); + ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a, + elsio->u.els_logo.els_logo_pyld, + sizeof(*elsio->u.els_logo.els_logo_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -2696,28 +2687,32 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa; els_iocb->port_id[1] = sp->fcport->d_id.b.area; els_iocb->port_id[2] = sp->fcport->d_id.b.domain; - els_iocb->s_id[0] = vha->d_id.b.al_pa; - els_iocb->s_id[1] = vha->d_id.b.area; - els_iocb->s_id[2] = vha->d_id.b.domain; - els_iocb->control_flags = 0; + /* For SID the byte order is different than DID */ + els_iocb->s_id[1] = vha->d_id.b.al_pa; + els_iocb->s_id[2] = vha->d_id.b.area; + els_iocb->s_id[0] = vha->d_id.b.domain; if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) { + els_iocb->control_flags = 0; els_iocb->tx_byte_count = els_iocb->tx_len = - sizeof(struct els_plogi_payload); + cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma, &els_iocb->tx_address); els_iocb->rx_dsd_count = 1; els_iocb->rx_byte_count = els_iocb->rx_len = - sizeof(struct els_plogi_payload); + cpu_to_le32(sizeof(struct els_plogi_payload)); put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma, &els_iocb->rx_address); ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073, "PLOGI ELS IOCB:\n"); ql_dump_buffer(ql_log_info, vha, 0x0109, - (uint8_t *)els_iocb, 0x70); + (uint8_t *)els_iocb, + sizeof(*els_iocb)); } else { - els_iocb->tx_byte_count = sizeof(struct els_logo_payload); + els_iocb->control_flags = 1 << 13; + els_iocb->tx_byte_count = + cpu_to_le32(sizeof(struct els_logo_payload)); put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma, &els_iocb->tx_address); els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload)); @@ -2725,6 +2720,11 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb) els_iocb->rx_byte_count = 0; els_iocb->rx_address = 0; els_iocb->rx_len = 0; + ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076, + "LOGO ELS IOCB:"); + ql_dump_buffer(ql_log_info, vha, 0x010b, + els_iocb, + sizeof(*els_iocb)); } sp->vha->qla_stats.control_requests++; @@ -2736,34 +2736,57 @@ qla2x00_els_dcmd2_iocb_timeout(void *data) srb_t *sp = data; fc_port_t *fcport = sp->fcport; struct scsi_qla_host *vha = sp->vha; - struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; - int res; + int res, h; ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069, "%s hdl=%x ELS Timeout, %8phC portid=%06x\n", sp->name, sp->handle, fcport->port_name, fcport->d_id.b24); /* Abort the exchange */ - spin_lock_irqsave(&ha->hardware_lock, flags); - res = ha->isp_ops->abort_command(sp); + res = qla24xx_async_abort_cmd(sp, false); ql_dbg(ql_dbg_io, vha, 0x3070, "mbx abort_command %s\n", (res == QLA_SUCCESS) ? "successful" : "failed"); - spin_unlock_irqrestore(&ha->hardware_lock, flags); + if (res) { + spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); + for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) { + if (sp->qpair->req->outstanding_cmds[h] == sp) { + sp->qpair->req->outstanding_cmds[h] = NULL; + break; + } + } + spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags); + sp->done(sp, QLA_FUNCTION_TIMEOUT); + } +} - sp->done(sp, QLA_FUNCTION_TIMEOUT); +void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi) +{ + if (els_plogi->els_plogi_pyld) + dma_free_coherent(&vha->hw->pdev->dev, + els_plogi->tx_size, + els_plogi->els_plogi_pyld, + els_plogi->els_plogi_pyld_dma); + + if (els_plogi->els_resp_pyld) + dma_free_coherent(&vha->hw->pdev->dev, + els_plogi->rx_size, + els_plogi->els_resp_pyld, + els_plogi->els_resp_pyld_dma); } -static void -qla2x00_els_dcmd2_sp_done(void *ptr, int res) +static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; fc_port_t *fcport = sp->fcport; struct srb_iocb *lio = &sp->u.iocb_cmd; struct scsi_qla_host *vha = sp->vha; struct event_arg ea; struct qla_work_evt *e; + struct fc_port *conflict_fcport; + port_id_t cid; /* conflict Nport id */ + u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status; + u16 lid; ql_dbg(ql_dbg_disc, vha, 0x3072, "%s ELS done rc %d hdl=%x, portid=%06x %8phC\n", @@ -2775,31 +2798,109 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res) if (sp->flags & SRB_WAKEUP_ON_COMP) complete(&lio->u.els_plogi.comp); else { - if (res) { - set_bit(RELOGIN_NEEDED, &vha->dpc_flags); - } else { + switch (fw_status[0]) { + case CS_DATA_UNDERRUN: + case CS_COMPLETE: memset(&ea, 0, sizeof(ea)); ea.fcport = fcport; ea.rc = res; - ea.event = FCME_ELS_PLOGI_DONE; - qla2x00_fcport_event_handler(vha, &ea); + qla_handle_els_plogi_done(vha, &ea); + break; + + case CS_IOCB_ERROR: + switch (fw_status[1]) { + case LSC_SCODE_PORTID_USED: + lid = fw_status[2] & 0xffff; + qlt_find_sess_invalidate_other(vha, + wwn_to_u64(fcport->port_name), + fcport->d_id, lid, &conflict_fcport); + if (conflict_fcport) { + /* + * Another fcport shares the same + * loop_id & nport id; conflict + * fcport needs to finish cleanup + * before this fcport can proceed + * to login. + */ + conflict_fcport->conflict = fcport; + fcport->login_pause = 1; + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + } else { + ql_dbg(ql_dbg_disc, vha, 0x20ed, + "%s %d %8phC pid %06x inuse with lid %#x sched del\n", + __func__, __LINE__, + fcport->port_name, + fcport->d_id.b24, lid); + qla2x00_clear_loop_id(fcport); + set_bit(lid, vha->hw->loop_id_map); + fcport->loop_id = lid; + fcport->keep_nport_handle = 0; + qlt_schedule_sess_for_deletion(fcport); + } + break; + + case LSC_SCODE_NPORT_USED: + cid.b.domain = (fw_status[2] >> 16) & 0xff; + cid.b.area = (fw_status[2] >> 8) & 0xff; + cid.b.al_pa = fw_status[2] & 0xff; + cid.b.rsvd_1 = 0; + + ql_dbg(ql_dbg_disc, vha, 0x20ec, + "%s %d %8phC lid %#x in use with pid %06x post gnl\n", + __func__, __LINE__, fcport->port_name, + fcport->loop_id, cid.b24); + set_bit(fcport->loop_id, + vha->hw->loop_id_map); + fcport->loop_id = FC_NO_LOOP_ID; + qla24xx_post_gnl_work(vha, fcport); + break; + + case LSC_SCODE_NOXCB: + vha->hw->exch_starvation++; + if (vha->hw->exch_starvation > 5) { + ql_log(ql_log_warn, vha, 0xd046, + "Exchange starvation. Resetting RISC\n"); + vha->hw->exch_starvation = 0; + set_bit(ISP_ABORT_NEEDED, + &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } + /* fall through */ + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + fcport->flags &= ~FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, + DSC_LOGIN_FAILED); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; + } + break; + + default: + ql_dbg(ql_dbg_disc, vha, 0x20eb, + "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n", + __func__, sp->fcport->port_name, + fw_status[0], fw_status[1], fw_status[2]); + + sp->fcport->flags &= ~FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED); + set_bit(RELOGIN_NEEDED, &vha->dpc_flags); + break; } e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP); if (!e) { struct srb_iocb *elsio = &sp->u.iocb_cmd; - if (elsio->u.els_plogi.els_plogi_pyld) - dma_free_coherent(&sp->vha->hw->pdev->dev, - elsio->u.els_plogi.tx_size, - elsio->u.els_plogi.els_plogi_pyld, - elsio->u.els_plogi.els_plogi_pyld_dma); - - if (elsio->u.els_plogi.els_resp_pyld) - dma_free_coherent(&sp->vha->hw->pdev->dev, - elsio->u.els_plogi.rx_size, - elsio->u.els_plogi.els_resp_pyld, - elsio->u.els_plogi.els_resp_pyld_dma); + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); sp->free(sp); return; } @@ -2823,14 +2924,16 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, if (!sp) { ql_log(ql_log_info, vha, 0x70e6, "SRB allocation failed\n"); + fcport->flags &= ~FCF_ASYNC_ACTIVE; return -ENOMEM; } + fcport->flags |= FCF_ASYNC_SENT; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND); elsio = &sp->u.iocb_cmd; ql_dbg(ql_dbg_io, vha, 0x3073, "Enter: PLOGI portid=%06x\n", fcport->d_id.b24); - fcport->flags |= FCF_ASYNC_SENT; sp->type = SRB_ELS_DCMD; sp->name = "ELS_DCMD"; sp->fcport = fcport; @@ -2876,7 +2979,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n"); ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109, - (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70); + (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, + sizeof(*elsio->u.els_plogi.els_plogi_pyld)); rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) { @@ -2898,19 +3002,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode, } out: - fcport->flags &= ~(FCF_ASYNC_SENT); - if (elsio->u.els_plogi.els_plogi_pyld) - dma_free_coherent(&sp->vha->hw->pdev->dev, - elsio->u.els_plogi.tx_size, - elsio->u.els_plogi.els_plogi_pyld, - elsio->u.els_plogi.els_plogi_pyld_dma); - - if (elsio->u.els_plogi.els_resp_pyld) - dma_free_coherent(&sp->vha->hw->pdev->dev, - elsio->u.els_plogi.rx_size, - elsio->u.els_plogi.els_resp_pyld, - elsio->u.els_plogi.els_resp_pyld_dma); - + fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE); + qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi); sp->free(sp); done: return rval; @@ -3115,7 +3208,6 @@ qla82xx_start_scsi(srb_t *sp) unsigned long flags; struct scsi_cmnd *cmd; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; uint16_t cnt; uint16_t req_cnt; @@ -3155,16 +3247,8 @@ qla82xx_start_scsi(srb_t *sp) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ @@ -3235,7 +3319,7 @@ sufficient_dsds: goto queuing_error; } - ctx = sp->u.scmd.ctx = + ctx = sp->u.scmd.ct6_ctx = mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); if (!ctx) { ql_log(ql_log_fatal, vha, 0x3010, @@ -3431,9 +3515,9 @@ queuing_error: if (tot_dsds) scsi_dma_unmap(cmd); - if (sp->u.scmd.ctx) { - mempool_free(sp->u.scmd.ctx, ha->ctx_mempool); - sp->u.scmd.ctx = NULL; + if (sp->u.scmd.crc_ctx) { + mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool); + sp->u.scmd.crc_ctx = NULL; } spin_unlock_irqrestore(&ha->hardware_lock, flags); @@ -3668,6 +3752,9 @@ qla2x00_start_sp(srb_t *sp) break; } + if (sp->start_timer) + add_timer(&sp->u.iocb_cmd.timer); + wmb(); qla2x00_start_iocbs(vha, qp->req); done: @@ -3769,7 +3856,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) struct qla_hw_data *ha = vha->hw; unsigned long flags; uint32_t handle; - uint32_t index; uint16_t req_cnt; uint16_t cnt; uint32_t *clr_ptr; @@ -3794,17 +3880,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - - if (index == req->num_outstanding_cmds) { + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { rval = EXT_STATUS_BUSY; goto queuing_error; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 78aec50abe0f..e40705d38cea 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -776,7 +776,6 @@ skip_rio: case MBA_LOOP_INIT_ERR: ql_log(ql_log_warn, vha, 0x5090, "LOOP INIT ERROR (%x).\n", mb[1]); - ha->isp_ops->fw_dump(vha, 1); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; @@ -789,7 +788,7 @@ skip_rio: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -862,7 +861,7 @@ skip_rio: } vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -882,7 +881,7 @@ skip_rio: if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -925,7 +924,7 @@ skip_rio: atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); if (!N2N_TOPO(ha)) - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -954,7 +953,7 @@ skip_rio: if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { @@ -1023,7 +1022,6 @@ skip_rio: "Marking port lost loopid=%04x portid=%06x.\n", fcport->loop_id, fcport->d_id.b24); if (qla_ini_mode_enabled(vha)) { - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); fcport->logout_on_delete = 0; qlt_schedule_sess_for_deletion(fcport); } @@ -1035,14 +1033,14 @@ global_port_update: atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } if (vha->vp_idx) { atomic_set(&vha->vp_state, VP_FAILED); fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } vha->flags.management_server_logged_in = 0; @@ -1062,8 +1060,6 @@ global_port_update: ql_dbg(ql_dbg_async, vha, 0x5011, "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", mb[1], mb[2], mb[3]); - - qlt_async_event(mb[0], vha, mb); break; } @@ -1080,8 +1076,6 @@ global_port_update: set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); set_bit(VP_CONFIG_OK, &vha->vp_flags); - - qlt_async_event(mb[0], vha, mb); break; case MBA_RSCN_UPDATE: /* State Change Registration */ @@ -1119,10 +1113,9 @@ global_port_update: struct event_arg ea; memset(&ea, 0, sizeof(ea)); - ea.event = FCME_RSCN; ea.id.b24 = rscn_entry; ea.id.b.rsvd_1 = rscn_entry >> 24; - qla2x00_fcport_event_handler(vha, &ea); + qla2x00_handle_rscn(vha, &ea); qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry); } break; @@ -1229,20 +1222,63 @@ global_port_update: break; case MBA_IDC_AEN: - mb[4] = RD_REG_WORD(®24->mailbox4); - mb[5] = RD_REG_WORD(®24->mailbox5); - mb[6] = RD_REG_WORD(®24->mailbox6); - mb[7] = RD_REG_WORD(®24->mailbox7); - qla83xx_handle_8200_aen(vha, mb); + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + ha->flags.fw_init_done = 0; + ql_log(ql_log_warn, vha, 0xffff, + "MPI Heartbeat stop. Chip reset needed. MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n", + mb[0], mb[1], mb[2], mb[3]); + + if ((mb[1] & BIT_8) || + (mb[2] & BIT_8)) { + ql_log(ql_log_warn, vha, 0xd013, + "MPI Heartbeat stop. FW dump needed\n"); + ha->fw_dump_mpi = 1; + ha->isp_ops->fw_dump(vha, 1); + } + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + qla2xxx_wake_dpc(vha); + } else if (IS_QLA83XX(ha)) { + mb[4] = RD_REG_WORD(®24->mailbox4); + mb[5] = RD_REG_WORD(®24->mailbox5); + mb[6] = RD_REG_WORD(®24->mailbox6); + mb[7] = RD_REG_WORD(®24->mailbox7); + qla83xx_handle_8200_aen(vha, mb); + } else { + ql_dbg(ql_dbg_async, vha, 0x5052, + "skip Heartbeat processing mb0-3=[0x%04x] [0x%04x] [0x%04x] [0x%04x]\n", + mb[0], mb[1], mb[2], mb[3]); + } break; case MBA_DPORT_DIAGNOSTICS: ql_dbg(ql_dbg_async, vha, 0x5052, - "D-Port Diagnostics: %04x result=%s\n", - mb[0], - mb[1] == 0 ? "start" : - mb[1] == 1 ? "done (pass)" : - mb[1] == 2 ? "done (error)" : "other"); + "D-Port Diagnostics: %04x %04x %04x %04x\n", + mb[0], mb[1], mb[2], mb[3]); + if (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { + static char *results[] = { + "start", "done(pass)", "done(error)", "undefined" }; + static char *types[] = { + "none", "dynamic", "static", "other" }; + uint result = mb[1] >> 0 & 0x3; + uint type = mb[1] >> 6 & 0x3; + uint sw = mb[1] >> 15 & 0x1; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: result=%s type=%s [sw=%u]\n", + results[result], types[type], sw); + if (result == 2) { + static char *reasons[] = { + "reserved", "unexpected reject", + "unexpected phase", "retry exceeded", + "timed out", "not supported", + "user stopped" }; + uint reason = mb[2] >> 0 & 0xf; + uint phase = mb[2] >> 12 & 0xf; + ql_dbg(ql_dbg_async, vha, 0x5052, + "D-Port Diagnostics: reason=%s phase=%u \n", + reason < 7 ? reasons[reason] : "other", + phase >> 1); + } + } break; case MBA_TEMPERATURE_ALERT: @@ -1514,7 +1550,7 @@ qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req, if (comp_status == CS_DATA_UNDERRUN) { res = DID_OK << 16; bsg_reply->reply_payload_rcv_len = - le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len); + le16_to_cpu(pkt->rsp_info_len); ql_log(ql_log_warn, vha, 0x5048, "CT pass-through-%s error comp_status=0x%x total_byte=0x%x.\n", @@ -1903,6 +1939,18 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, inbuf = (uint32_t *)&sts->nvme_ersp_data; outbuf = (uint32_t *)fd->rspaddr; iocb->u.nvme.rsp_pyld_len = le16_to_cpu(sts->nvme_rsp_pyld_len); + if (unlikely(iocb->u.nvme.rsp_pyld_len > + sizeof(struct nvme_fc_ersp_iu))) { + if (ql_mask_match(ql_dbg_io)) { + WARN_ONCE(1, "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + ql_log(ql_log_warn, fcport->vha, 0x5100, + "Unexpected response payload length %u.\n", + iocb->u.nvme.rsp_pyld_len); + } + iocb->u.nvme.rsp_pyld_len = + sizeof(struct nvme_fc_ersp_iu); + } iter = iocb->u.nvme.rsp_pyld_len >> 2; for (; iter; iter--) *outbuf++ = swab32(*inbuf++); @@ -2137,12 +2185,12 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * swab32 of the "data" field in the beginning of qla2x00_status_entry() * would make guard field appear at offset 2 */ - a_guard = le16_to_cpu(*(uint16_t *)(ap + 2)); - a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0)); - a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4)); - e_guard = le16_to_cpu(*(uint16_t *)(ep + 2)); - e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0)); - e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4)); + a_guard = get_unaligned_le16(ap + 2); + a_app_tag = get_unaligned_le16(ap + 0); + a_ref_tag = get_unaligned_le32(ap + 4); + e_guard = get_unaligned_le16(ep + 2); + e_app_tag = get_unaligned_le16(ep + 0); + e_ref_tag = get_unaligned_le32(ep + 4); ql_dbg(ql_dbg_io, vha, 0x3023, "iocb(s) %p Returned STATUS.\n", sts24); @@ -2257,11 +2305,8 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt, struct bsg_job *bsg_job = NULL; struct fc_bsg_request *bsg_request; struct fc_bsg_reply *bsg_reply; - sts_entry_t *sts; - struct sts_entry_24xx *sts24; - - sts = (sts_entry_t *) pkt; - sts24 = (struct sts_entry_24xx *) pkt; + sts_entry_t *sts = pkt; + struct sts_entry_24xx *sts24 = pkt; /* Validate handle. */ if (index >= req->num_outstanding_cmds) { @@ -2407,8 +2452,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) srb_t *sp; fc_port_t *fcport; struct scsi_cmnd *cp; - sts_entry_t *sts; - struct sts_entry_24xx *sts24; + sts_entry_t *sts = pkt; + struct sts_entry_24xx *sts24 = pkt; uint16_t comp_status; uint16_t scsi_status; uint16_t ox_id; @@ -2426,8 +2471,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) uint16_t state_flags = 0; uint16_t retry_delay = 0; - sts = (sts_entry_t *) pkt; - sts24 = (struct sts_entry_24xx *) pkt; if (IS_FWI2_CAPABLE(ha)) { comp_status = le16_to_cpu(sts24->comp_status); scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK; @@ -2473,6 +2516,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt) return; } + if (sp->abort) + sp->aborted = 1; + else + sp->completed = 1; + if (sp->cmd_type != TYPE_SRB) { req->outstanding_cmds[handle] = NULL; ql_dbg(ql_dbg_io, vha, 0x3015, @@ -2727,10 +2775,9 @@ check_scsi_status: "Port to be marked lost on fcport=%02x%02x%02x, current " "port state= %s comp_status %x.\n", fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa, - port_state_str[atomic_read(&fcport->state)], + port_state_str[FCS_ONLINE], comp_status); - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); qlt_schedule_sess_for_deletion(fcport); } @@ -3471,10 +3518,8 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ha->msix_count, ret); goto msix_out; } else if (ret < ha->msix_count) { - ql_log(ql_log_warn, vha, 0x00c6, - "MSI-X: Failed to enable support " - "with %d vectors, using %d vectors.\n", - ha->msix_count, ret); + ql_log(ql_log_info, vha, 0x00c6, + "MSI-X: Using %d vectors\n", ret); ha->msix_count = ret; /* Recalculate queue values */ if (ha->mqiobase && (ql2xmqsupport || ql2xnvmeenable)) { @@ -3633,7 +3678,7 @@ qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp) skip_msix: ql_log(ql_log_info, vha, 0x0037, - "Falling back-to MSI mode -%d.\n", ret); + "Falling back-to MSI mode -- ret=%d.\n", ret); if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) && !IS_QLA8001(ha) && !IS_P3P_TYPE(ha) && !IS_QLAFX00(ha) && @@ -3641,13 +3686,13 @@ skip_msix: goto skip_msi; ret = pci_alloc_irq_vectors(ha->pdev, 1, 1, PCI_IRQ_MSI); - if (!ret) { + if (ret > 0) { ql_dbg(ql_dbg_init, vha, 0x0038, "MSI: Enabled.\n"); ha->flags.msi_enabled = 1; } else ql_log(ql_log_warn, vha, 0x0039, - "Falling back-to INTa mode -- %d.\n", ret); + "Falling back-to INTa mode -- ret=%d.\n", ret); skip_msi: /* Skip INTx on ISP82xx. */ diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 133f5f6270ff..9e09964f5c0e 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -253,21 +253,9 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) if ((!abort_active && io_lock_on) || IS_NOPOLLING_TYPE(ha)) { set_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags); - if (IS_P3P_TYPE(ha)) { - if (RD_REG_DWORD(®->isp82.hint) & - HINT_MBX_INT_PENDING) { - ha->flags.mbox_busy = 0; - spin_unlock_irqrestore(&ha->hardware_lock, - flags); - - atomic_dec(&ha->num_pend_mbx_stage2); - ql_dbg(ql_dbg_mbx, vha, 0x1010, - "Pending mailbox timeout, exiting.\n"); - rval = QLA_FUNCTION_TIMEOUT; - goto premature_exit; - } + if (IS_P3P_TYPE(ha)) WRT_REG_DWORD(®->isp82.hint, HINT_MBX_INT_PENDING); - } else if (IS_FWI2_CAPABLE(ha)) + else if (IS_FWI2_CAPABLE(ha)) WRT_REG_DWORD(®->isp24.hccr, HCCRX_SET_HOST_INT); else WRT_REG_WORD(®->isp.hccr, HCCR_SET_HOST_INT); @@ -394,8 +382,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp) goto premature_exit; } - if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) + if (ha->mailbox_out[0] != MBS_COMMAND_COMPLETE) { + ql_dbg(ql_dbg_mbx, vha, 0x11ff, + "mb_out[0] = %#x <> %#x\n", ha->mailbox_out[0], + MBS_COMMAND_COMPLETE); rval = QLA_FUNCTION_FAILED; + } /* Load return mailbox registers. */ iptr2 = mcp->mb; @@ -710,6 +702,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) mcp->mb[2] = LSW(risc_addr); mcp->mb[3] = 0; mcp->mb[4] = 0; + mcp->mb[11] = 0; ha->flags.using_lr_setting = 0; if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) { @@ -754,7 +747,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr) if (ha->flags.exchoffld_enabled) mcp->mb[4] |= ENABLE_EXCHANGE_OFFLD; - mcp->out_mb |= MBX_4|MBX_3|MBX_2|MBX_1; + mcp->out_mb |= MBX_4 | MBX_3 | MBX_2 | MBX_1 | MBX_11; mcp->in_mb |= MBX_3 | MBX_2 | MBX_1; } else { mcp->mb[1] = LSW(risc_addr); @@ -1939,7 +1932,7 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt) pd24 = (struct port_database_24xx *) pd; /* Check for logged in state. */ - if (fcport->fc4f_nvme) { + if (NVME_TARGET(ha, fcport)) { current_login_state = pd24->current_login_state >> 4; last_login_state = pd24->last_login_state >> 4; } else { @@ -2257,7 +2250,7 @@ qla2x00_lip_reset(scsi_qla_host_t *vha) mbx_cmd_t mc; mbx_cmd_t *mcp = &mc; - ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a, + ql_dbg(ql_dbg_disc, vha, 0x105a, "Entered %s.\n", __func__); if (IS_CNA_CAPABLE(vha->hw)) { @@ -3891,14 +3884,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, case TOPO_N2N: ha->current_topology = ISP_CFG_N; spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); + list_for_each_entry(fcport, &vha->vp_fcports, list) { + fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; + } + fcport = qla2x00_find_fcport_by_wwpn(vha, rptid_entry->u.f1.port_name, 1); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); if (fcport) { fcport->plogi_nack_done_deadline = jiffies + HZ; - fcport->dm_login_expire = jiffies + 3*HZ; + fcport->dm_login_expire = jiffies + 2*HZ; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->n2n_flag = 1; + fcport->keep_nport_handle = 1; + fcport->fc4_type = FS_FC4TYPE_FCP; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + switch (fcport->disc_state) { case DSC_DELETED: set_bit(RELOGIN_NEEDED, @@ -3917,6 +3921,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, vha->d_id.b24 = 0; vha->d_id.b.al_pa = 1; ha->flags.n2n_bigger = 1; + ha->flags.n2n_ae = 0; id.b.al_pa = 2; ql_dbg(ql_dbg_async, vha, 0x5075, @@ -3927,18 +3932,18 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, "Format 1: Remote login - Waiting for WWPN %8phC.\n", rptid_entry->u.f1.port_name); ha->flags.n2n_bigger = 0; + ha->flags.n2n_ae = 1; } qla24xx_post_newsess_work(vha, &id, rptid_entry->u.f1.port_name, rptid_entry->u.f1.node_name, NULL, - FC4_TYPE_UNKNOWN); + FS_FCP_IS_N2N); } /* if our portname is higher then initiate N2N login */ set_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags); - ha->flags.n2n_ae = 1; return; break; case TOPO_FL: @@ -4031,6 +4036,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, list_for_each_entry(fcport, &vha->vp_fcports, list) { fcport->scan_state = QLA_FCPORT_SCAN; + fcport->n2n_flag = 0; } fcport = qla2x00_find_fcport_by_wwpn(vha, @@ -4040,6 +4046,14 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha, fcport->login_retry = vha->hw->login_retry_count; fcport->plogi_nack_done_deadline = jiffies + HZ; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->keep_nport_handle = 1; + fcport->n2n_flag = 1; + fcport->d_id.b.domain = + rptid_entry->u.f2.remote_nport_id[2]; + fcport->d_id.b.area = + rptid_entry->u.f2.remote_nport_id[1]; + fcport->d_id.b.al_pa = + rptid_entry->u.f2.remote_nport_id[0]; } } } @@ -6138,9 +6152,8 @@ qla2x00_dump_mctp_data(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr, mcp->mb[7] = LSW(MSD(req_dma)); mcp->mb[8] = MSW(addr); /* Setting RAM ID to valid */ - mcp->mb[10] |= BIT_7; /* For MCTP RAM ID is 0x40 */ - mcp->mb[10] |= 0x40; + mcp->mb[10] = BIT_7 | 0x40; mcp->out_mb |= MBX_10|MBX_8|MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1| MBX_0; @@ -6213,10 +6226,8 @@ qla26xx_dport_diagnostics(scsi_qla_host_t *vha, return rval; } -static void qla2x00_async_mb_sp_done(void *s, int res) +static void qla2x00_async_mb_sp_done(srb_t *sp, int res) { - struct srb *sp = s; - sp->u.iocb_cmd.u.mbx.rc = res; complete(&sp->u.iocb_cmd.u.mbx.comp); @@ -6277,17 +6288,13 @@ int qla24xx_send_mb_cmd(struct scsi_qla_host *vha, mbx_cmd_t *mcp) case QLA_SUCCESS: ql_dbg(ql_dbg_mbx, vha, 0x119d, "%s: %s done.\n", __func__, sp->name); - sp->free(sp); break; default: ql_dbg(ql_dbg_mbx, vha, 0x119e, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - sp->free(sp); break; } - return rval; - done_free_sp: sp->free(sp); done: @@ -6352,7 +6359,7 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, uint64_t zero = 0; u8 current_login_state, last_login_state; - if (fcport->fc4f_nvme) { + if (NVME_TARGET(vha->hw, fcport)) { current_login_state = pd->current_login_state >> 4; last_login_state = pd->last_login_state >> 4; } else { @@ -6387,8 +6394,8 @@ int __qla24xx_parse_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, fcport->d_id.b.al_pa = pd->port_id[2]; fcport->d_id.b.rsvd_1 = 0; - if (fcport->fc4f_nvme) { - fcport->port_type = 0; + if (NVME_TARGET(vha->hw, fcport)) { + fcport->port_type = FCT_NVME; if ((pd->prli_svc_param_word_3[0] & BIT_5) == 0) fcport->port_type |= FCT_NVME_INITIATOR; if ((pd->prli_svc_param_word_3[0] & BIT_4) == 0) diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index b2977e49356b..8ae639d089d1 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -66,6 +66,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; + u8 i; mutex_lock(&ha->vport_lock); /* @@ -75,8 +76,11 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) * ensures no active vp_list traversal while the vport is removed * from the queue) */ - wait_event_timeout(vha->vref_waitq, !atomic_read(&vha->vref_count), - 10*HZ); + for (i = 0; i < 10; i++) { + if (wait_event_timeout(vha->vref_waitq, + !atomic_read(&vha->vref_count), HZ) > 0) + break; + } spin_lock_irqsave(&ha->vport_slock, flags); if (atomic_read(&vha->vref_count)) { @@ -143,7 +147,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vha->vp_idx); - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } @@ -163,7 +167,7 @@ qla24xx_disable_vp(scsi_qla_host_t *vha) list_for_each_entry(fcport, &vha->vp_fcports, list) fcport->logout_on_delete = 0; - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); /* Remove port id from vp target map */ spin_lock_irqsave(&vha->hw->hardware_lock, flags); @@ -262,6 +266,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { + if (test_bit(VPORT_DELETE, &vha->dpc_flags)) + continue; + atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); @@ -300,28 +307,35 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { + fc_port_t *fcport; + + /* + * To exclusively reset vport, we need to log it out first. + * Note: This control_vp can fail if ISP reset is already + * issued, this is expected, as the vp would be already + * logged out due to ISP reset. + */ + if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { + qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); + list_for_each_entry(fcport, &vha->vp_fcports, list) + fcport->logout_on_delete = 0; + } + /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } - /* - * To exclusively reset vport, we need to log it out first. Note: this - * control_vp can fail if ISP reset is already issued, this is - * expected, as the vp would be already logged out due to ISP reset. - */ - if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) - qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); - ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); + return qla24xx_enable_vp(vha); } @@ -901,10 +915,8 @@ failed: return 0; } -static void qla_ctrlvp_sp_done(void *s, int res) +static void qla_ctrlvp_sp_done(srb_t *sp, int res) { - struct srb *sp = s; - if (sp->comp) complete(sp->comp); /* don't free sp here. Let the caller do the free */ @@ -934,7 +946,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) sp = qla2x00_get_sp(base_vha, NULL, GFP_KERNEL); if (!sp) - goto done; + return rval; sp->type = SRB_CTRL_VP; sp->name = "ctrl_vp"; @@ -950,7 +962,7 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) ql_dbg(ql_dbg_async, vha, 0xffff, "%s: %s Failed submission. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + goto done; } ql_dbg(ql_dbg_vport, vha, 0x113f, "%s hndl %x submitted\n", @@ -968,16 +980,13 @@ int qla24xx_control_vp(scsi_qla_host_t *vha, int cmd) case QLA_SUCCESS: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s done.\n", __func__, sp->name); - goto done_free_sp; + break; default: ql_dbg(ql_dbg_vport, vha, 0xffff, "%s: %s Failed. %x.\n", __func__, sp->name, rval); - goto done_free_sp; + break; } done: - return rval; - -done_free_sp: sp->free(sp); return rval; } diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c index 942ee13b96a4..cad1fc2a1b28 100644 --- a/drivers/scsi/qla2xxx/qla_mr.c +++ b/drivers/scsi/qla2xxx/qla_mr.c @@ -10,7 +10,6 @@ #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> -#include <linux/bsg-lib.h> #include <scsi/scsi_tcq.h> #include <linux/utsname.h> @@ -149,7 +148,8 @@ qlafx00_mailbox_command(scsi_qla_host_t *vha, struct mbx_cmd_32 *mcp) QLAFX00_SET_HST_INTR(ha, ha->mbx_intr_code); spin_unlock_irqrestore(&ha->hardware_lock, flags); - wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ); + WARN_ON_ONCE(wait_for_completion_timeout(&ha->mbx_intr_comp, + mcp->tov * HZ) != 0); } else { ql_dbg(ql_dbg_mbx, vha, 0x112c, "Cmd=%x Polling Mode.\n", command); @@ -688,14 +688,12 @@ qlafx00_config_rings(struct scsi_qla_host *vha) } char * -qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str) +qlafx00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { struct qla_hw_data *ha = vha->hw; - if (pci_is_pcie(ha->pdev)) { - strcpy(str, "PCIe iSA"); - return str; - } + if (pci_is_pcie(ha->pdev)) + strlcpy(str, "PCIe iSA", str_len); return str; } @@ -791,7 +789,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->cregbase = - ioremap_nocache(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 0), BAR0_LEN_FX00); if (!ha->cregbase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x0128, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -812,7 +810,7 @@ qlafx00_iospace_config(struct qla_hw_data *ha) } ha->iobase = - ioremap_nocache(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); + ioremap(pci_resource_start(ha->pdev, 2), BAR2_LEN_FX00); if (!ha->iobase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x012b, "cannot remap MMIO (%s), aborting\n", pci_name(ha->pdev)); @@ -1212,9 +1210,9 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha, " Existing TGT-ID %x did not get " " offline event from firmware.\n", fcport->old_tgt_id); - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); return rval; } break; @@ -1232,7 +1230,7 @@ qlafx00_find_all_targets(scsi_qla_host_t *vha, return QLA_MEMORY_ALLOC_FAILED; } - kfree(new_fcport); + qla2x00_free_fcport(new_fcport); return rval; } @@ -1276,7 +1274,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha) if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) { if (fcport->port_type != FCT_INITIATOR) - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); } } @@ -1300,7 +1298,7 @@ qlafx00_configure_all_targets(scsi_qla_host_t *vha) /* Free all new device structures not processed. */ list_for_each_entry_safe(fcport, rmptemp, &new_fcports, list) { list_del(&fcport->list); - kfree(fcport); + qla2x00_free_fcport(fcport); } return rval; @@ -1708,7 +1706,7 @@ qlafx00_tgt_detach(struct scsi_qla_host *vha, int tgt_id) if (!fcport) return; - qla2x00_mark_device_lost(vha, fcport, 0, 0); + qla2x00_mark_device_lost(vha, fcport, 0); return; } @@ -1742,7 +1740,7 @@ qlafx00_process_aen(struct scsi_qla_host *vha, struct qla_work_evt *evt) set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); } else if (evt->u.aenfx.mbx[2] == 2) { vha->device_flags |= DFLG_NO_CABLE; - qla2x00_mark_all_devices_lost(vha, 1); + qla2x00_mark_all_devices_lost(vha); } } break; @@ -1799,10 +1797,8 @@ qla2x00_fxdisc_iocb_timeout(void *data) complete(&lio->u.fxiocb.fxiocb_comp); } -static void -qla2x00_fxdisc_sp_done(void *ptr, int res) +static void qla2x00_fxdisc_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; struct srb_iocb *lio = &sp->u.iocb_cmd; complete(&lio->u.fxiocb.fxiocb_comp); @@ -1881,22 +1877,22 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) phost_info = &preg_hsi->hsi; memset(preg_hsi, 0, sizeof(struct register_host_info)); phost_info->os_type = OS_TYPE_LINUX; - strncpy(phost_info->sysname, - p_sysid->sysname, SYSNAME_LENGTH); - strncpy(phost_info->nodename, - p_sysid->nodename, NODENAME_LENGTH); + strlcpy(phost_info->sysname, p_sysid->sysname, + sizeof(phost_info->sysname)); + strlcpy(phost_info->nodename, p_sysid->nodename, + sizeof(phost_info->nodename)); if (!strcmp(phost_info->nodename, "(none)")) ha->mr.host_info_resend = true; - strncpy(phost_info->release, - p_sysid->release, RELEASE_LENGTH); - strncpy(phost_info->version, - p_sysid->version, VERSION_LENGTH); - strncpy(phost_info->machine, - p_sysid->machine, MACHINE_LENGTH); - strncpy(phost_info->domainname, - p_sysid->domainname, DOMNAME_LENGTH); - strncpy(phost_info->hostdriver, - QLA2XXX_VERSION, VERSION_LENGTH); + strlcpy(phost_info->release, p_sysid->release, + sizeof(phost_info->release)); + strlcpy(phost_info->version, p_sysid->version, + sizeof(phost_info->version)); + strlcpy(phost_info->machine, p_sysid->machine, + sizeof(phost_info->machine)); + strlcpy(phost_info->domainname, p_sysid->domainname, + sizeof(phost_info->domainname)); + strlcpy(phost_info->hostdriver, QLA2XXX_VERSION, + sizeof(phost_info->hostdriver)); preg_hsi->utc = (uint64_t)ktime_get_real_seconds(); ql_dbg(ql_dbg_init, vha, 0x0149, "ISP%04X: Host registration with firmware\n", @@ -1941,8 +1937,10 @@ qlafx00_fx_disc(scsi_qla_host_t *vha, fc_port_t *fcport, uint16_t fx_type) if (fx_type == FXDISC_GET_CONFIG_INFO) { struct config_info_data *pinfo = (struct config_info_data *) fdisc->u.fxiocb.rsp_addr; - strcpy(vha->hw->model_number, pinfo->model_num); - strcpy(vha->hw->model_desc, pinfo->model_description); + strlcpy(vha->hw->model_number, pinfo->model_num, + ARRAY_SIZE(vha->hw->model_number)); + strlcpy(vha->hw->model_desc, pinfo->model_description, + ARRAY_SIZE(vha->hw->model_desc)); memcpy(&vha->hw->mr.symbolic_name, pinfo->symbolic_name, sizeof(vha->hw->mr.symbolic_name)); memcpy(&vha->hw->mr.serial_num, pinfo->serial_num, @@ -2515,7 +2513,7 @@ check_scsi_status: atomic_read(&fcport->state)); if (atomic_read(&fcport->state) == FCS_ONLINE) - qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1); + qla2x00_mark_device_lost(fcport->vha, fcport, 1); break; case CS_ABORTED: @@ -2541,6 +2539,8 @@ check_scsi_status: if (rsp->status_srb == NULL) sp->done(sp, res); + else + WARN_ON_ONCE(true); } /** @@ -2618,6 +2618,8 @@ qlafx00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt) if (sense_len == 0) { rsp->status_srb = NULL; sp->done(sp, cp->result); + } else { + WARN_ON_ONCE(true); } } @@ -3073,7 +3075,6 @@ qlafx00_start_scsi(srb_t *sp) { int nseg; unsigned long flags; - uint32_t index; uint32_t handle; uint16_t cnt; uint16_t req_cnt; @@ -3097,16 +3098,8 @@ qlafx00_start_scsi(srb_t *sp) /* Acquire ring specific lock */ spin_lock_irqsave(&ha->hardware_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - if (index == req->num_outstanding_cmds) + handle = qla2xxx_get_next_handle(req); + if (handle == 0) goto queuing_error; /* Map the sg table so we have an accurate count of sg entries needed */ diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c index 963094b3c300..bfcd02fdf2b8 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.c +++ b/drivers/scsi/qla2xxx/qla_nvme.c @@ -180,10 +180,9 @@ static void qla_nvme_ls_complete(struct work_struct *work) kref_put(&priv->sp->cmd_kref, qla_nvme_release_ls_cmd_kref); } -static void qla_nvme_sp_ls_done(void *ptr, int res) +static void qla_nvme_sp_ls_done(srb_t *sp, int res) { - srb_t *sp = ptr; - struct nvme_private *priv; + struct nvme_private *priv = sp->priv; if (WARN_ON_ONCE(kref_read(&sp->cmd_kref) == 0)) return; @@ -191,17 +190,15 @@ static void qla_nvme_sp_ls_done(void *ptr, int res) if (res) res = -EINVAL; - priv = (struct nvme_private *)sp->priv; priv->comp_status = res; INIT_WORK(&priv->ls_work, qla_nvme_ls_complete); schedule_work(&priv->ls_work); } /* it assumed that QPair lock is held. */ -static void qla_nvme_sp_done(void *ptr, int res) +static void qla_nvme_sp_done(srb_t *sp, int res) { - srb_t *sp = ptr; - struct nvme_private *priv = (struct nvme_private *)sp->priv; + struct nvme_private *priv = sp->priv; priv->comp_status = res; kref_put(&sp->cmd_kref, qla_nvme_release_fcp_cmd_kref); @@ -222,13 +219,13 @@ static void qla_nvme_abort_work(struct work_struct *work) "%s called for sp=%p, hndl=%x on fcport=%p deleted=%d\n", __func__, sp, sp->handle, fcport, fcport->deleted); - if (!ha->flags.fw_started && (fcport && fcport->deleted)) + if (!ha->flags.fw_started && fcport->deleted) goto out; if (ha->flags.host_shutting_down) { ql_log(ql_log_info, sp->fcport->vha, 0xffff, - "%s Calling done on sp: %p, type: 0x%x, sp->ref_count: 0x%x\n", - __func__, sp, sp->type, atomic_read(&sp->ref_count)); + "%s Calling done on sp: %p, type: 0x%x\n", + __func__, sp, sp->type); sp->done(sp, 0); goto out; } @@ -267,7 +264,6 @@ static void qla_nvme_ls_abort(struct nvme_fc_local_port *lport, schedule_work(&priv->abort_work); } - static int qla_nvme_ls_req(struct nvme_fc_local_port *lport, struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd) { @@ -357,7 +353,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) { unsigned long flags; uint32_t *clr_ptr; - uint32_t index; uint32_t handle; struct cmd_nvme *cmd_pkt; uint16_t cnt, i; @@ -381,17 +376,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp) /* Acquire qpair specific lock */ spin_lock_irqsave(&qpair->qp_lock, flags); - /* Check for room in outstanding command list. */ - handle = req->current_outstanding_cmd; - for (index = 1; index < req->num_outstanding_cmds; index++) { - handle++; - if (handle == req->num_outstanding_cmds) - handle = 1; - if (!req->outstanding_cmds[handle]) - break; - } - - if (index == req->num_outstanding_cmds) { + handle = qla2xxx_get_next_handle(req); + if (handle == 0) { rval = -EBUSY; goto queuing_error; } @@ -624,6 +610,7 @@ static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport) } static struct nvme_fc_port_template qla_nvme_fc_transport = { + .module = THIS_MODULE, .localport_delete = qla_nvme_localport_delete, .remoteport_delete = qla_nvme_remoteport_delete, .create_queue = qla_nvme_alloc_queue, @@ -653,7 +640,9 @@ void qla_nvme_unregister_remote_port(struct fc_port *fcport) "%s: unregister remoteport on %p %8phN\n", __func__, fcport, fcport->port_name); - nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); + if (test_bit(PFLG_DRIVER_REMOVING, &fcport->vha->pci_flags)) + nvme_fc_set_remoteport_devloss(fcport->nvme_remote_port, 0); + init_completion(&fcport->nvme_del_done); ret = nvme_fc_unregister_remoteport(fcport->nvme_remote_port); if (ret) diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h index 67bb4a2a3742..ef912902d4e5 100644 --- a/drivers/scsi/qla2xxx/qla_nvme.h +++ b/drivers/scsi/qla2xxx/qla_nvme.h @@ -7,7 +7,6 @@ #ifndef __QLA_NVME_H #define __QLA_NVME_H -#include <linux/blk-mq.h> #include <uapi/scsi/fc/fc_fs.h> #include <uapi/scsi/fc/fc_els.h> #include <linux/nvme-fc-driver.h> @@ -119,7 +118,7 @@ struct pt_ls4_rx_unsol { uint32_t exchange_address; uint8_t d_id[3]; uint8_t r_ctl; - uint8_t s_id[3]; + be_id_t s_id; uint8_t cs_ctl; uint8_t f_ctl[3]; uint8_t type; @@ -144,5 +143,5 @@ int qla_nvme_register_remote(struct scsi_qla_host *, struct fc_port *); void qla_nvme_delete(struct scsi_qla_host *); void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *, struct pt_ls4_request *, struct req_que *); -void qla24xx_async_gffid_sp_done(void *, int); +void qla24xx_async_gffid_sp_done(struct srb *sp, int); #endif diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index c760ae354174..185c5f34d4c1 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -1612,8 +1612,7 @@ qla82xx_get_bootld_offset(struct qla_hw_data *ha) return (u8 *)&ha->hablob->fw->data[offset]; } -static __le32 -qla82xx_get_fw_size(struct qla_hw_data *ha) +static u32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; @@ -1624,7 +1623,7 @@ qla82xx_get_fw_size(struct qla_hw_data *ha) return cpu_to_le32(uri_desc->size); } - return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); + return get_unaligned_le32(&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * @@ -1816,7 +1815,7 @@ qla82xx_fw_load_from_blob(struct qla_hw_data *ha) } flashaddr = FLASH_ADDR_START; - size = (__force u32)qla82xx_get_fw_size(ha) / 8; + size = qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { @@ -1883,7 +1882,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha) static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { - __le32 val; + uint32_t val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; @@ -1896,8 +1895,8 @@ qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) min_size = QLA82XX_URI_FW_MIN_SIZE; } else { - val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); - if ((__force u32)val != QLA82XX_BDINFO_MAGIC) + val = get_unaligned_le32(&fw->data[QLA82XX_FW_MAGIC_OFFSET]); + if (val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; @@ -1977,7 +1976,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) } while (--retries); ql_log(ql_log_fatal, vha, 0x00ac, - "Rcv Peg initializatin failed: 0x%x.\n", val); + "Rcv Peg initialization failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); @@ -1985,7 +1984,7 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) } /* ISR related functions */ -static struct qla82xx_legacy_intr_set legacy_intr[] = \ +static struct qla82xx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; /* @@ -2287,7 +2286,9 @@ qla82xx_disable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); - qla82xx_mbx_intr_disable(vha); + if (ha->interrupts_on) + qla82xx_mbx_intr_disable(vha); + spin_lock_irq(&ha->hardware_lock); if (IS_QLA8044(ha)) qla8044_wr_reg(ha, LEG_INTR_MASK_OFFSET, 1); @@ -3028,7 +3029,7 @@ qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); vha->flags.online = 0; vha->flags.init_done = 0; } @@ -3286,7 +3287,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) case QLA8XXX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); /* Reset timeout value after quiescence handler */ - dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_QUIESCENT: @@ -3301,7 +3302,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); /* Reset timeout value after quiescence handler */ - dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ + dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_FAILED: @@ -3686,7 +3687,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - if ((!sp->u.scmd.ctx || + if ((!sp->u.scmd.crc_ctx || (sp->flags & SRB_FCP_CMND_DMA_VALID)) && !ha->flags.isp82xx_fw_hung) { @@ -3710,10 +3711,12 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) /* Wait for pending cmds (physical and virtual) to complete */ if (qla2x00_eh_wait_for_pending_commands(vha, 0, 0, - WAIT_HOST)) { + WAIT_HOST) == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00b3, "Done wait for " "pending commands.\n"); + } else { + WARN_ON_ONCE(true); } } } @@ -4232,7 +4235,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) goto md_failed; } - entry_hdr = (qla82xx_md_entry_hdr_t *) \ + entry_hdr = (qla82xx_md_entry_hdr_t *) (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); /* Walk through the entry headers */ @@ -4339,7 +4342,7 @@ qla82xx_md_collect(scsi_qla_host_t *vha) data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->md_dump; skip_nxt_entry: - entry_hdr = (qla82xx_md_entry_hdr_t *) \ + entry_hdr = (qla82xx_md_entry_hdr_t *) (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 3c7beef92c35..230abee10598 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -486,13 +486,13 @@ #define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) #define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) -#define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 -#define QLA82XX_PCI_DIRECT_CRB (unsigned long)0x04400000 -#define QLA82XX_PCI_CAMQM (unsigned long)0x04800000 -#define QLA82XX_PCI_CAMQM_MAX (unsigned long)0x04ffffff -#define QLA82XX_PCI_DDR_NET (unsigned long)0x00000000 -#define QLA82XX_PCI_QDR_NET (unsigned long)0x04000000 -#define QLA82XX_PCI_QDR_NET_MAX (unsigned long)0x043fffff +#define QLA82XX_PCI_CRBSPACE 0x06000000UL +#define QLA82XX_PCI_DIRECT_CRB 0x04400000UL +#define QLA82XX_PCI_CAMQM 0x04800000UL +#define QLA82XX_PCI_CAMQM_MAX 0x04ffffffUL +#define QLA82XX_PCI_DDR_NET 0x00000000UL +#define QLA82XX_PCI_QDR_NET 0x04000000UL +#define QLA82XX_PCI_QDR_NET_MAX 0x043fffffUL /* * Register offsets for MN diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c index 369ac04d0454..c056f466f1f4 100644 --- a/drivers/scsi/qla2xxx/qla_nx2.c +++ b/drivers/scsi/qla2xxx/qla_nx2.c @@ -2810,7 +2810,7 @@ error: #define ISP8044_PEX_DMA_ENGINE_INDEX 8 #define ISP8044_PEX_DMA_BASE_ADDRESS 0x77320000 -#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000 +#define ISP8044_PEX_DMA_NUM_OFFSET 0x10000UL #define ISP8044_PEX_DMA_CMD_ADDR_LOW 0x0 #define ISP8044_PEX_DMA_CMD_ADDR_HIGH 0x04 #define ISP8044_PEX_DMA_CMD_STS_AND_CNTRL 0x08 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 2e58cff9d200..b520a980d1dc 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -69,7 +69,7 @@ MODULE_PARM_DESC(ql2xplogiabsentdevice, "a Fabric scan. This is needed for several broken switches. " "Default is 0 - no PLOGI. 1 - perform PLOGI."); -int ql2xloginretrycount = 0; +int ql2xloginretrycount; module_param(ql2xloginretrycount, int, S_IRUGO); MODULE_PARM_DESC(ql2xloginretrycount, "Specify an alternate value for the NVRAM login retry count."); @@ -234,7 +234,7 @@ MODULE_PARM_DESC(ql2xmdenable, "0 - MiniDump disabled. " "1 (Default) - MiniDump enabled."); -int ql2xexlogins = 0; +int ql2xexlogins; module_param(ql2xexlogins, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xexlogins, "Number of extended Logins. " @@ -250,7 +250,7 @@ module_param(ql2xiniexchg, uint, 0644); MODULE_PARM_DESC(ql2xiniexchg, "Number of initiator exchanges."); -int ql2xfwholdabts = 0; +int ql2xfwholdabts; module_param(ql2xfwholdabts, int, S_IRUGO); MODULE_PARM_DESC(ql2xfwholdabts, "Allow FW to hold status IOCB until ABTS rsp received. " @@ -536,80 +536,70 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) } static char * -qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str) +qla2x00_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { struct qla_hw_data *ha = vha->hw; - static char *pci_bus_modes[] = { + static const char *const pci_bus_modes[] = { "33", "66", "100", "133", }; uint16_t pci_bus; - strcpy(str, "PCI"); pci_bus = (ha->pci_attr & (BIT_9 | BIT_10)) >> 9; if (pci_bus) { - strcat(str, "-X ("); - strcat(str, pci_bus_modes[pci_bus]); + snprintf(str, str_len, "PCI-X (%s MHz)", + pci_bus_modes[pci_bus]); } else { pci_bus = (ha->pci_attr & BIT_8) >> 8; - strcat(str, " ("); - strcat(str, pci_bus_modes[pci_bus]); + snprintf(str, str_len, "PCI (%s MHz)", pci_bus_modes[pci_bus]); } - strcat(str, " MHz)"); - return (str); + return str; } static char * -qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str) +qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str, size_t str_len) { - static char *pci_bus_modes[] = { "33", "66", "100", "133", }; + static const char *const pci_bus_modes[] = { + "33", "66", "100", "133", + }; struct qla_hw_data *ha = vha->hw; uint32_t pci_bus; if (pci_is_pcie(ha->pdev)) { - char lwstr[6]; uint32_t lstat, lspeed, lwidth; + const char *speed_str; pcie_capability_read_dword(ha->pdev, PCI_EXP_LNKCAP, &lstat); lspeed = lstat & PCI_EXP_LNKCAP_SLS; lwidth = (lstat & PCI_EXP_LNKCAP_MLW) >> 4; - strcpy(str, "PCIe ("); switch (lspeed) { case 1: - strcat(str, "2.5GT/s "); + speed_str = "2.5GT/s"; break; case 2: - strcat(str, "5.0GT/s "); + speed_str = "5.0GT/s"; break; case 3: - strcat(str, "8.0GT/s "); + speed_str = "8.0GT/s"; break; default: - strcat(str, "<unknown> "); + speed_str = "<unknown>"; break; } - snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth); - strcat(str, lwstr); + snprintf(str, str_len, "PCIe (%s x%d)", speed_str, lwidth); return str; } - strcpy(str, "PCI"); pci_bus = (ha->pci_attr & CSRX_PCIX_BUS_MODE_MASK) >> 8; - if (pci_bus == 0 || pci_bus == 8) { - strcat(str, " ("); - strcat(str, pci_bus_modes[pci_bus >> 3]); - } else { - strcat(str, "-X "); - if (pci_bus & BIT_2) - strcat(str, "Mode 2"); - else - strcat(str, "Mode 1"); - strcat(str, " ("); - strcat(str, pci_bus_modes[pci_bus & ~BIT_2]); - } - strcat(str, " MHz)"); + if (pci_bus == 0 || pci_bus == 8) + snprintf(str, str_len, "PCI (%s MHz)", + pci_bus_modes[pci_bus >> 3]); + else + snprintf(str, str_len, "PCI-X Mode %d (%s MHz)", + pci_bus & 4 ? 2 : 1, + pci_bus_modes[pci_bus & 3]); return str; } @@ -662,13 +652,10 @@ qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size) return str; } -void -qla2x00_sp_free_dma(void *ptr) +void qla2x00_sp_free_dma(srb_t *sp) { - srb_t *sp = ptr; struct qla_hw_data *ha = sp->vha->hw; struct scsi_cmnd *cmd = GET_CMD_SP(sp); - void *ctx = GET_CMD_CTX_SP(sp); if (sp->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); @@ -681,24 +668,21 @@ qla2x00_sp_free_dma(void *ptr) sp->flags &= ~SRB_CRC_PROT_DMA_VALID; } - if (!ctx) - return; - if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ - qla2x00_clean_dsd_pool(ha, ctx); + qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } if (sp->flags & SRB_CRC_CTX_DMA_VALID) { - struct crc_context *ctx0 = ctx; + struct crc_context *ctx0 = sp->u.scmd.crc_ctx; dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = ctx; + struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); @@ -709,34 +693,23 @@ qla2x00_sp_free_dma(void *ptr) } } -void -qla2x00_sp_compl(void *ptr, int res) +void qla2x00_sp_compl(srb_t *sp, int res) { - srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) - return; - - atomic_dec(&sp->ref_count); - sp->free(sp); cmd->result = res; CMD_SP(cmd) = NULL; cmd->scsi_done(cmd); if (comp) complete(comp); - qla2x00_rel_sp(sp); } -void -qla2xxx_qpair_sp_free_dma(void *ptr) +void qla2xxx_qpair_sp_free_dma(srb_t *sp) { - srb_t *sp = (srb_t *)ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct qla_hw_data *ha = sp->fcport->vha->hw; - void *ctx = GET_CMD_CTX_SP(sp); if (sp->flags & SRB_DMA_VALID) { scsi_dma_unmap(cmd); @@ -749,17 +722,14 @@ qla2xxx_qpair_sp_free_dma(void *ptr) sp->flags &= ~SRB_CRC_PROT_DMA_VALID; } - if (!ctx) - return; - if (sp->flags & SRB_CRC_CTX_DSD_VALID) { /* List assured to be having elements */ - qla2x00_clean_dsd_pool(ha, ctx); + qla2x00_clean_dsd_pool(ha, sp->u.scmd.crc_ctx); sp->flags &= ~SRB_CRC_CTX_DSD_VALID; } if (sp->flags & SRB_DIF_BUNDL_DMA_VALID) { - struct crc_context *difctx = ctx; + struct crc_context *difctx = sp->u.scmd.crc_ctx; struct dsd_dma *dif_dsd, *nxt_dsd; list_for_each_entry_safe(dif_dsd, nxt_dsd, @@ -795,7 +765,7 @@ qla2xxx_qpair_sp_free_dma(void *ptr) } if (sp->flags & SRB_FCP_CMND_DMA_VALID) { - struct ct6_dsd *ctx1 = ctx; + struct ct6_dsd *ctx1 = sp->u.scmd.ct6_ctx; dma_pool_free(ha->fcp_cmnd_dma_pool, ctx1->fcp_cmnd, ctx1->fcp_cmnd_dma); @@ -807,32 +777,24 @@ qla2xxx_qpair_sp_free_dma(void *ptr) } if (sp->flags & SRB_CRC_CTX_DMA_VALID) { - struct crc_context *ctx0 = ctx; + struct crc_context *ctx0 = sp->u.scmd.crc_ctx; - dma_pool_free(ha->dl_dma_pool, ctx, ctx0->crc_ctx_dma); + dma_pool_free(ha->dl_dma_pool, ctx0, ctx0->crc_ctx_dma); sp->flags &= ~SRB_CRC_CTX_DMA_VALID; } } -void -qla2xxx_qpair_sp_compl(void *ptr, int res) +void qla2xxx_qpair_sp_compl(srb_t *sp, int res) { - srb_t *sp = ptr; struct scsi_cmnd *cmd = GET_CMD_SP(sp); struct completion *comp = sp->comp; - if (WARN_ON_ONCE(atomic_read(&sp->ref_count) == 0)) - return; - - atomic_dec(&sp->ref_count); - sp->free(sp); cmd->result = res; CMD_SP(cmd) = NULL; cmd->scsi_done(cmd); if (comp) complete(comp); - qla2xxx_rel_qpair_sp(sp->qpair, sp); } static int @@ -845,9 +807,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); srb_t *sp; int rval; - struct qla_qpair *qpair = NULL; - uint32_t tag; - uint16_t hwq; if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)) || WARN_ON_ONCE(!rport)) { @@ -856,6 +815,10 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) } if (ha->mqenable) { + uint32_t tag; + uint16_t hwq; + struct qla_qpair *qpair = NULL; + tag = blk_mq_unique_tag(cmd->request); hwq = blk_mq_unique_tag_to_hwq(tag); qpair = ha->queue_pair_map[hwq]; @@ -925,13 +888,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) else goto qc24_target_busy; - sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC); - if (!sp) - goto qc24_host_busy; + sp = scsi_cmd_priv(cmd); + qla2xxx_init_sp(sp, vha, vha->hw->base_qpair, fcport); sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; - atomic_set(&sp->ref_count, 1); + CMD_SP(cmd) = (void *)sp; sp->free = qla2x00_sp_free_dma; sp->done = qla2x00_sp_compl; @@ -948,9 +910,6 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) qc24_host_busy_free_sp: sp->free(sp); -qc24_host_busy: - return SCSI_MLQUEUE_HOST_BUSY; - qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; @@ -1011,24 +970,21 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, else goto qc24_target_busy; - sp = qla2xxx_get_qpair_sp(vha, qpair, fcport, GFP_ATOMIC); - if (!sp) - goto qc24_host_busy; + sp = scsi_cmd_priv(cmd); + qla2xxx_init_sp(sp, vha, qpair, fcport); sp->u.scmd.cmd = cmd; sp->type = SRB_SCSI_CMD; - atomic_set(&sp->ref_count, 1); CMD_SP(cmd) = (void *)sp; sp->free = qla2xxx_qpair_sp_free_dma; sp->done = qla2xxx_qpair_sp_compl; - sp->qpair = qpair; rval = ha->isp_ops->start_scsi_mq(sp); if (rval != QLA_SUCCESS) { ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3078, "Start scsi failed rval=%d for cmd=%p.\n", rval, cmd); if (rval == QLA_INTERFACE_ERROR) - goto qc24_fail_command; + goto qc24_free_sp_fail_command; goto qc24_host_busy_free_sp; } @@ -1037,12 +993,14 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd, qc24_host_busy_free_sp: sp->free(sp); -qc24_host_busy: - return SCSI_MLQUEUE_HOST_BUSY; - qc24_target_busy: return SCSI_MLQUEUE_TARGET_BUSY; +qc24_free_sp_fail_command: + sp->free(sp); + CMD_SP(cmd) = NULL; + qla2xxx_rel_qpair_sp(sp->qpair, sp); + qc24_fail_command: cmd->scsi_done(cmd); @@ -1058,8 +1016,8 @@ qc24_fail_command: * cmd = Scsi Command to wait on. * * Return: - * Not Found : 0 - * Found : 1 + * Completed in time : QLA_SUCCESS + * Did not complete in time : QLA_FUNCTION_FAILED */ static int qla2x00_eh_wait_on_command(struct scsi_cmnd *cmd) @@ -1150,9 +1108,17 @@ static inline int test_fcport_count(scsi_qla_host_t *vha) void qla2x00_wait_for_sess_deletion(scsi_qla_host_t *vha) { - qla2x00_mark_all_devices_lost(vha, 0); + u8 i; + + qla2x00_mark_all_devices_lost(vha); + + for (i = 0; i < 10; i++) { + if (wait_event_timeout(vha->fcport_waitQ, + test_fcport_count(vha), HZ) > 0) + break; + } - wait_event_timeout(vha->fcport_waitQ, test_fcport_count(vha), 10*HZ); + flush_workqueue(vha->hw->wq); } /* @@ -1211,16 +1177,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha) return return_status; } -static int -sp_get(struct srb *sp) -{ - if (!refcount_inc_not_zero((refcount_t *)&sp->ref_count)) - /* kref get fail */ - return ENXIO; - else - return 0; -} - #define ISP_REG_DISCONNECT 0xffffffffU /************************************************************************** * qla2x00_isp_reg_stat @@ -1269,14 +1225,16 @@ static int qla2xxx_eh_abort(struct scsi_cmnd *cmd) { scsi_qla_host_t *vha = shost_priv(cmd->device->host); + DECLARE_COMPLETION_ONSTACK(comp); srb_t *sp; int ret; unsigned int id; uint64_t lun; - unsigned long flags; int rval; struct qla_hw_data *ha = vha->hw; + uint32_t ratov_j; struct qla_qpair *qpair; + unsigned long flags; if (qla2x00_isp_reg_stat(ha)) { ql_log(ql_log_info, vha, 0x8042, @@ -1288,29 +1246,28 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) if (ret != 0) return ret; - sp = (srb_t *) CMD_SP(cmd); - if (!sp) - return SUCCESS; - + sp = scsi_cmd_priv(cmd); qpair = sp->qpair; - if (!qpair) + + if ((sp->fcport && sp->fcport->deleted) || !qpair) return SUCCESS; spin_lock_irqsave(qpair->qp_lock_ptr, flags); - if (sp->type != SRB_SCSI_CMD || GET_CMD_SP(sp) != cmd) { - /* there's a chance an interrupt could clear - the ptr as part of done & free */ + if (sp->completed) { spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); return SUCCESS; } - if (sp_get(sp)){ - /* ref_count is already 0 */ + if (sp->abort || sp->aborted) { spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - return SUCCESS; + return FAILED; } + + sp->abort = 1; + sp->comp = ∁ spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); + id = cmd->device->id; lun = cmd->device->lun; @@ -1318,28 +1275,37 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) "Aborting from RISC nexus=%ld:%d:%llu sp=%p cmd=%p handle=%x\n", vha->host_no, id, lun, sp, cmd, sp->handle); + /* + * Abort will release the original Command/sp from FW. Let the + * original command call scsi_done. In return, he will wakeup + * this sleeping thread. + */ rval = ha->isp_ops->abort_command(sp); + ql_dbg(ql_dbg_taskm, vha, 0x8003, "Abort command mbx cmd=%p, rval=%x.\n", cmd, rval); + /* Wait for the command completion. */ + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: - /* - * The command has been aborted. That means that the firmware - * won't report a completion. - */ - sp->done(sp, DID_ABORT << 16); - ret = SUCCESS; + if (!wait_for_completion_timeout(&comp, ratov_j)) { + ql_dbg(ql_dbg_taskm, vha, 0xffff, + "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", + __func__, ha->r_a_tov/10); + ret = FAILED; + } else { + ret = SUCCESS; + } break; default: - /* - * Either abort failed or abort and completion raced. Let - * the SCSI core retry the abort in the former case. - */ ret = FAILED; break; } + sp->comp = NULL; + ql_log(ql_log_info, vha, 0x801c, "Abort command issued nexus=%ld:%d:%llu -- %x.\n", vha->host_no, id, lun, ret); @@ -1347,6 +1313,9 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd) return ret; } +/* + * Returns: QLA_SUCCESS or QLA_FUNCTION_FAILED. + */ int qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t, uint64_t l, enum nexus_wait_type type) @@ -1420,6 +1389,9 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type, if (err != 0) return err; + if (fcport->deleted) + return SUCCESS; + ql_log(ql_log_info, vha, 0x8009, "%s RESET ISSUED nexus=%ld:%d:%llu cmd=%p.\n", name, vha->host_no, cmd->device->id, cmd->device->lun, cmd); @@ -1534,6 +1506,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd) return ret; ret = FAILED; + if (qla2x00_chip_is_down(vha)) + return ret; + ql_log(ql_log_info, vha, 0x8012, "BUS RESET ISSUED nexus=%ld:%d:%llu.\n", vha->host_no, id, lun); @@ -1692,7 +1667,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha) if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) { atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); ret = qla2x00_full_login_lip(vha); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_taskm, vha, 0x802d, @@ -1722,29 +1697,52 @@ static void qla2x00_abort_srb(struct qla_qpair *qp, srb_t *sp, const int res, scsi_qla_host_t *vha = qp->vha; struct qla_hw_data *ha = vha->hw; int rval; + bool ret_cmd; + uint32_t ratov_j; - if (sp_get(sp)) + if (qla2x00_chip_is_down(vha)) { + sp->done(sp, res); return; + } if (sp->type == SRB_NVME_CMD || sp->type == SRB_NVME_LS || (sp->type == SRB_SCSI_CMD && !ha->flags.eeh_busy && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) && !qla2x00_isp_reg_stat(ha))) { + if (sp->comp) { + sp->done(sp, res); + return; + } + sp->comp = ∁ + sp->abort = 1; spin_unlock_irqrestore(qp->qp_lock_ptr, *flags); - rval = ha->isp_ops->abort_command(sp); + rval = ha->isp_ops->abort_command(sp); + /* Wait for command completion. */ + ret_cmd = false; + ratov_j = ha->r_a_tov/10 * 4 * 1000; + ratov_j = msecs_to_jiffies(ratov_j); switch (rval) { case QLA_SUCCESS: - sp->done(sp, res); + if (wait_for_completion_timeout(&comp, ratov_j)) { + ql_dbg(ql_dbg_taskm, vha, 0xffff, + "%s: Abort wait timer (4 * R_A_TOV[%d]) expired\n", + __func__, ha->r_a_tov/10); + ret_cmd = true; + } + /* else FW return SP to driver */ break; - case QLA_FUNCTION_PARAMETER_ERROR: - wait_for_completion(&comp); + default: + ret_cmd = true; break; } spin_lock_irqsave(qp->qp_lock_ptr, *flags); - sp->comp = NULL; + if (ret_cmd && (!sp->completed || !sp->aborted)) + sp->done(sp, res); + } else { + sp->done(sp, res); } } @@ -1767,7 +1765,6 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { - req->outstanding_cmds[cnt] = NULL; switch (sp->cmd_type) { case TYPE_SRB: qla2x00_abort_srb(qp, sp, res, &flags); @@ -1789,6 +1786,7 @@ __qla2x00_abort_all_cmds(struct qla_qpair *qp, int res) default: break; } + req->outstanding_cmds[cnt] = NULL; } } spin_unlock_irqrestore(qp->qp_lock_ptr, flags); @@ -1800,8 +1798,13 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) int que; struct qla_hw_data *ha = vha->hw; + /* Continue only if initialization complete. */ + if (!ha->base_qpair) + return; __qla2x00_abort_all_cmds(ha->base_qpair, res); + if (!ha->queue_pair_map) + return; for (que = 0; que < ha->max_qpairs; que++) { if (!ha->queue_pair_map[que]) continue; @@ -2477,7 +2480,7 @@ static struct isp_operations qla27xx_isp_ops = { .config_rings = qla24xx_config_rings, .reset_adapter = qla24xx_reset_adapter, .nvram_config = qla81xx_nvram_config, - .update_fw_options = qla81xx_update_fw_options, + .update_fw_options = qla24xx_update_fw_options, .load_risc = qla81xx_load_risc, .pci_info_str = qla24xx_pci_info_str, .fw_version_str = qla24xx_fw_version_str, @@ -3154,6 +3157,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) ql_log(ql_log_fatal, base_vha, 0x003d, "Failed to allocate memory for queue pointers..." "aborting.\n"); + ret = -ENODEV; goto probe_failed; } @@ -3232,6 +3236,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out); ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0); + if (unlikely(!ha->wq)) { + ret = -ENOMEM; + goto probe_failed; + } if (ha->isp_ops->initialize_adapter(base_vha)) { ql_log(ql_log_fatal, base_vha, 0x00d6, @@ -3418,7 +3426,8 @@ skip_dpc: "QLogic %s - %s.\n", ha->model_number, ha->model_desc); ql_log(ql_log_info, base_vha, 0x00fc, "ISP%04X: %s @ %s hdma%c host#=%ld fw=%s.\n", - pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info), + pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info, + sizeof(pci_info)), pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-', base_vha->host_no, ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str))); @@ -3440,6 +3449,12 @@ skip_dpc: return 0; probe_failed: + if (base_vha->gnl.l) { + dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, + base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + } + if (base_vha->timer_active) qla2x00_stop_timer(base_vha); base_vha->flags.online = 0; @@ -3487,6 +3502,29 @@ disable_device: return ret; } +static void __qla_set_remove_flag(scsi_qla_host_t *base_vha) +{ + scsi_qla_host_t *vp; + unsigned long flags; + struct qla_hw_data *ha; + + if (!base_vha) + return; + + ha = base_vha->hw; + + spin_lock_irqsave(&ha->vport_slock, flags); + list_for_each_entry(vp, &ha->vp_list, list) + set_bit(PFLG_DRIVER_REMOVING, &vp->pci_flags); + + /* + * Indicate device removal to prevent future board_disable + * and wait until any pending board_disable has completed. + */ + set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); + spin_unlock_irqrestore(&ha->vport_slock, flags); +} + static void qla2x00_shutdown(struct pci_dev *pdev) { @@ -3503,7 +3541,7 @@ qla2x00_shutdown(struct pci_dev *pdev) * Prevent future board_disable and wait * until any pending board_disable has completed. */ - set_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags); + __qla_set_remove_flag(vha); cancel_work_sync(&ha->board_disable); if (!atomic_read(&pdev->enable_cnt)) @@ -3532,6 +3570,10 @@ qla2x00_shutdown(struct pci_dev *pdev) qla2x00_try_to_stop_firmware(vha); } + /* Disable timer */ + if (vha->timer_active) + qla2x00_stop_timer(vha); + /* Turn adapter off line */ vha->flags.online = 0; @@ -3659,10 +3701,7 @@ qla2x00_remove_one(struct pci_dev *pdev) ha = base_vha->hw; ql_log(ql_log_info, base_vha, 0xb079, "Removing driver\n"); - - /* Indicate device removal to prevent future board_disable and wait - * until any pending board_disable has completed. */ - set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags); + __qla_set_remove_flag(base_vha); cancel_work_sync(&ha->board_disable); /* @@ -3673,7 +3712,7 @@ qla2x00_remove_one(struct pci_dev *pdev) if (!atomic_read(&pdev->enable_cnt)) { dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); - + base_vha->gnl.l = NULL; scsi_host_put(base_vha->host); kfree(ha); pci_set_drvdata(pdev, NULL); @@ -3713,6 +3752,8 @@ qla2x00_remove_one(struct pci_dev *pdev) dma_free_coherent(&ha->pdev->dev, base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma); + base_vha->gnl.l = NULL; + vfree(base_vha->scan.l); if (IS_QLAFX00(ha)) @@ -3813,37 +3854,21 @@ void qla2x00_free_fcports(struct scsi_qla_host *vha) } static inline void -qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, - int defer) +qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport) { - struct fc_rport *rport; - scsi_qla_host_t *base_vha; - unsigned long flags; + int now; if (!fcport->rport) return; - rport = fcport->rport; - if (defer) { - base_vha = pci_get_drvdata(vha->hw->pdev); - spin_lock_irqsave(vha->host->host_lock, flags); - fcport->drport = rport; - spin_unlock_irqrestore(vha->host->host_lock, flags); - qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen); - set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); - qla2xxx_wake_dpc(base_vha); - } else { - int now; - - if (rport) { - ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, - "%s %8phN. rport %p roles %x\n", - __func__, fcport->port_name, rport, - rport->roles); - fc_remote_port_delete(rport); - } - qlt_do_generation_tick(vha, &now); + if (fcport->rport) { + ql_dbg(ql_dbg_disc, fcport->vha, 0x2109, + "%s %8phN. rport %p roles %x\n", + __func__, fcport->port_name, fcport->rport, + fcport->rport->roles); + fc_remote_port_delete(fcport->rport); } + qlt_do_generation_tick(vha, &now); } /* @@ -3856,18 +3881,18 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, * Context: */ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, - int do_login, int defer) + int do_login) { if (IS_QLAFX00(vha->hw)) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - qla2x00_schedule_rport_del(vha, fcport, defer); + qla2x00_schedule_rport_del(vha, fcport); return; } if (atomic_read(&fcport->state) == FCS_ONLINE && vha->vp_idx == fcport->vha->vp_idx) { qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - qla2x00_schedule_rport_del(vha, fcport, defer); + qla2x00_schedule_rport_del(vha, fcport); } /* * We may need to retry the login, so don't change the state of the @@ -3896,7 +3921,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport, * Context: */ void -qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) +qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha) { fc_port_t *fcport; @@ -3916,13 +3941,6 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer) */ if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD) continue; - if (atomic_read(&fcport->state) == FCS_ONLINE) { - qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); - if (defer) - qla2x00_schedule_rport_del(vha, fcport, defer); - else if (vha->vp_idx == fcport->vha->vp_idx) - qla2x00_schedule_rport_del(vha, fcport, defer); - } } } @@ -4590,6 +4608,7 @@ qla2x00_free_fw_dump(struct qla_hw_data *ha) ha->fce = NULL; ha->fce_dma = 0; + ha->flags.fce_enabled = 0; ha->eft = NULL; ha->eft_dma = 0; ha->fw_dumped = 0; @@ -4654,7 +4673,8 @@ qla2x00_mem_free(struct qla_hw_data *ha) ha->sfp_data = NULL; if (ha->flt) - dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + dma_free_coherent(&ha->pdev->dev, + sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, ha->flt, ha->flt_dma); ha->flt = NULL; ha->flt_dma = 0; @@ -4708,7 +4728,7 @@ qla2x00_mem_free(struct qla_hw_data *ha) mempool_destroy(ha->ctx_mempool); ha->ctx_mempool = NULL; - if (ql2xenabledif) { + if (ql2xenabledif && ha->dif_bundl_pool) { struct dsd_dma *dsd, *nxt; list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head, @@ -4731,8 +4751,7 @@ qla2x00_mem_free(struct qla_hw_data *ha) } } - if (ha->dif_bundl_pool) - dma_pool_destroy(ha->dif_bundl_pool); + dma_pool_destroy(ha->dif_bundl_pool); ha->dif_bundl_pool = NULL; qlt_mem_free(ha); @@ -4804,7 +4823,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, if (!vha->gnl.l) { ql_log(ql_log_fatal, vha, 0xd04a, "Alloc failed for name list.\n"); - scsi_remove_host(vha->host); + scsi_host_put(vha->host); return NULL; } @@ -4816,7 +4835,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, "Alloc failed for scan database.\n"); dma_free_coherent(&ha->pdev->dev, vha->gnl.size, vha->gnl.l, vha->gnl.ldma); - scsi_remove_host(vha->host); + vha->gnl.l = NULL; + scsi_host_put(vha->host); return NULL; } INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn); @@ -4922,7 +4942,6 @@ int qla2x00_post_async_##name##_work( \ qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN); qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT); -qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE); qla2x00_post_async_work(adisc, QLA_EVT_ASYNC_ADISC); qla2x00_post_async_work(prlo, QLA_EVT_ASYNC_PRLO); qla2x00_post_async_work(prlo_done, QLA_EVT_ASYNC_PRLO_DONE); @@ -4989,7 +5008,7 @@ void qla24xx_sched_upd_fcport(fc_port_t *fcport) fcport->jiffies_at_registration = jiffies; fcport->sec_since_registration = 0; fcport->next_disc_state = DSC_DELETED; - fcport->disc_state = DSC_UPD_FCPORT; + qla2x00_set_fcport_disc_state(fcport, DSC_UPD_FCPORT); spin_unlock_irqrestore(&fcport->vha->work_lock, flags); queue_work(system_unbound_wq, &fcport->reg_work); @@ -5030,23 +5049,27 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) fcport->d_id = e->u.new_sess.id; fcport->flags |= FCF_FABRIC_DEVICE; fcport->fw_login_state = DSC_LS_PLOGI_PEND; - if (e->u.new_sess.fc4_type == FS_FC4TYPE_FCP) - fcport->fc4_type = FC4_TYPE_FCP_SCSI; - - if (e->u.new_sess.fc4_type == FS_FC4TYPE_NVME) { - fcport->fc4_type = FC4_TYPE_OTHER; - fcport->fc4f_nvme = FC4_TYPE_NVME; - } memcpy(fcport->port_name, e->u.new_sess.port_name, WWN_SIZE); + + fcport->fc4_type = e->u.new_sess.fc4_type; + if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) { + fcport->fc4_type = FS_FC4TYPE_FCP; + fcport->n2n_flag = 1; + if (vha->flags.nvme_enabled) + fcport->fc4_type |= FS_FC4TYPE_NVME; + } + } else { ql_dbg(ql_dbg_disc, vha, 0xffff, "%s %8phC mem alloc fail.\n", __func__, e->u.new_sess.port_name); - if (pla) + if (pla) { + list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); + } return; } @@ -5077,6 +5100,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (fcport) { fcport->id_changed = 1; fcport->scan_state = QLA_FCPORT_FOUND; + fcport->chip_reset = vha->hw->base_qpair->chip_reset; memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE); if (pla) { @@ -5135,13 +5159,12 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (dfcp) qlt_schedule_sess_for_deletion(tfcp); - - if (N2N_TOPO(vha->hw)) - fcport->flags &= ~FCF_FABRIC_DEVICE; - if (N2N_TOPO(vha->hw)) { + fcport->flags &= ~FCF_FABRIC_DEVICE; + fcport->keep_nport_handle = 1; if (vha->flags.nvme_enabled) { - fcport->fc4f_nvme = 1; + fcport->fc4_type = + (FS_FC4TYPE_NVME | FS_FC4TYPE_FCP); fcport->n2n_flag = 1; } fcport->fw_login_state = 0; @@ -5156,8 +5179,10 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e) if (free_fcport) { qla2x00_free_fcport(fcport); - if (pla) + if (pla) { + list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); + } } } @@ -5204,10 +5229,6 @@ qla2x00_do_work(struct scsi_qla_host *vha) case QLA_EVT_ASYNC_LOGOUT: rc = qla2x00_async_logout(vha, e->u.logio.fcport); break; - case QLA_EVT_ASYNC_LOGOUT_DONE: - qla2x00_async_logout_done(vha, e->u.logio.fcport, - e->u.logio.data); - break; case QLA_EVT_ASYNC_ADISC: qla2x00_async_adisc(vha, e->u.logio.fcport, e->u.logio.data); @@ -5337,9 +5358,8 @@ void qla2x00_relogin(struct scsi_qla_host *vha) } else { if (vha->hw->current_topology != ISP_CFG_NL) { memset(&ea, 0, sizeof(ea)); - ea.event = FCME_RELOGIN; ea.fcport = fcport; - qla2x00_fcport_event_handler(vha, &ea); + qla24xx_handle_relogin_event(vha, &ea); } else if (vha->hw->current_topology == ISP_CFG_NL) { fcport->login_retry--; @@ -5677,7 +5697,6 @@ exit: void qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id) { - uint16_t options = (requester_id << 15) | BIT_6; uint32_t data; uint32_t lock_owner; struct qla_hw_data *ha = base_vha->hw; @@ -5710,22 +5729,6 @@ retry_lock: } return; - - /* XXX: IDC-lock implementation using access-control mbx */ -retry_lock2: - if (qla83xx_access_control(base_vha, options, 0, 0, NULL)) { - ql_dbg(ql_dbg_p3p, base_vha, 0xb072, - "Failed to acquire IDC lock. retrying...\n"); - /* Retry/Perform IDC-Lock recovery */ - if (qla83xx_idc_lock_recovery(base_vha) == QLA_SUCCESS) { - qla83xx_wait_logic(); - goto retry_lock2; - } else - ql_log(ql_log_warn, base_vha, 0xb076, - "IDC Lock recovery FAILED.\n"); - } - - return; } void @@ -6868,13 +6871,13 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) qpair->online = 0; mutex_unlock(&ha->mq_lock); - qla2x00_mark_all_devices_lost(vha, 0); + qla2x00_mark_all_devices_lost(vha); spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); - qla2x00_mark_all_devices_lost(vp, 0); + qla2x00_mark_all_devices_lost(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } @@ -7147,6 +7150,7 @@ struct scsi_host_template qla2xxx_driver_template = { .supported_mode = MODE_INITIATOR, .track_queue_depth = 1, + .cmd_size = sizeof(srb_t), }; static const struct pci_error_handlers qla2xxx_err_handler = { @@ -7238,6 +7242,8 @@ qla2x00_module_init(void) BUILD_BUG_ON(sizeof(struct sns_cmd_pkt) != 2064); BUILD_BUG_ON(sizeof(struct verify_chip_entry_84xx) != 64); BUILD_BUG_ON(sizeof(struct vf_evfp_entry_24xx) != 56); + BUILD_BUG_ON(sizeof(struct qla_flt_region) != 16); + BUILD_BUG_ON(sizeof(struct qla_flt_header) != 8); /* Allocate cache for SRBs. */ srb_cachep = kmem_cache_create("qla2xxx_srbs", sizeof(srb_t), 0, diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c index 1eb82384d933..76a38bf86cbc 100644 --- a/drivers/scsi/qla2xxx/qla_sup.c +++ b/drivers/scsi/qla2xxx/qla_sup.c @@ -473,22 +473,24 @@ qla24xx_read_flash_dword(struct qla_hw_data *ha, uint32_t addr, uint32_t *data) return QLA_FUNCTION_TIMEOUT; } -uint32_t * +int qla24xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { ulong i; + int ret = QLA_SUCCESS; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ faddr = flash_data_addr(ha, faddr); for (i = 0; i < dwords; i++, faddr++, dwptr++) { - if (qla24xx_read_flash_dword(ha, faddr, dwptr)) + ret = qla24xx_read_flash_dword(ha, faddr, dwptr); + if (ret != QLA_SUCCESS) break; cpu_to_le32s(dwptr); } - return dwptr; + return ret; } static int @@ -667,8 +669,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) struct qla_hw_data *ha = vha->hw; uint32_t def = IS_QLA81XX(ha) ? 2 : IS_QLA25XX(ha) ? 1 : 0; - struct qla_flt_header *flt = (void *)ha->flt; - struct qla_flt_region *region = (void *)&flt[1]; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *region = &flt->region[0]; uint16_t *wptr, cnt, chksum; uint32_t start; @@ -680,8 +682,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_flt = flt_addr; wptr = (uint16_t *)ha->flt; - qla24xx_read_flash_data(vha, (void *)flt, flt_addr, - (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE) >> 2); + ha->isp_ops->read_optrom(vha, (void *)flt, flt_addr << 2, + (sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE)); if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; @@ -845,15 +847,15 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr) ha->flt_region_img_status_pri = start; break; case FLT_REG_IMG_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_img_status_sec = start; break; case FLT_REG_FW_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_fw_sec = start; break; case FLT_REG_BOOTLOAD_SEC_27XX: - if (IS_QLA27XX(ha) && !IS_QLA28XX(ha)) + if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ha->flt_region_boot_sec = start; break; case FLT_REG_AUX_IMG_PRI_28XX: @@ -948,11 +950,11 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha) struct req_que *req = ha->req_q_map[0]; uint16_t cnt, chksum; uint16_t *wptr = (void *)req->ring; - struct qla_fdt_layout *fdt = (void *)req->ring; + struct qla_fdt_layout *fdt = (struct qla_fdt_layout *)req->ring; uint8_t man_id, flash_id; uint16_t mid = 0, fid = 0; - qla24xx_read_flash_data(vha, (void *)fdt, ha->flt_region_fdt, + ha->isp_ops->read_optrom(vha, fdt, ha->flt_region_fdt << 2, OPTROM_BURST_DWORDS); if (le16_to_cpu(*wptr) == 0xffff) goto no_flash_data; @@ -2650,18 +2652,15 @@ qla28xx_get_flash_region(struct scsi_qla_host *vha, uint32_t start, struct qla_flt_region *region) { struct qla_hw_data *ha = vha->hw; - struct qla_flt_header *flt; - struct qla_flt_region *flt_reg; + struct qla_flt_header *flt = ha->flt; + struct qla_flt_region *flt_reg = &flt->region[0]; uint16_t cnt; int rval = QLA_FUNCTION_FAILED; if (!ha->flt) return QLA_FUNCTION_FAILED; - flt = (struct qla_flt_header *)ha->flt; - flt_reg = (struct qla_flt_region *)&flt[1]; cnt = le16_to_cpu(flt->length) / sizeof(struct qla_flt_region); - for (; cnt; cnt--, flt_reg++) { if (flt_reg->start == start) { memcpy((uint8_t *)region, flt_reg, @@ -2723,8 +2722,11 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, "Region %x is secure\n", region.code); - if (region.code == FLT_REG_FW || - region.code == FLT_REG_FW_SEC_27XX) { + switch (region.code) { + case FLT_REG_FW: + case FLT_REG_FW_SEC_27XX: + case FLT_REG_MPI_PRI_28XX: + case FLT_REG_MPI_SEC_28XX: fw_array = dwptr; /* 1st fw array */ @@ -2755,9 +2757,23 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, buf_size_without_sfub += risc_size; fw_array += risc_size; } - } else { - ql_log(ql_log_warn + ql_dbg_verbose, vha, 0xffff, - "Secure region %x not supported\n", + break; + + case FLT_REG_PEP_PRI_28XX: + case FLT_REG_PEP_SEC_28XX: + fw_array = dwptr; + + /* 1st fw array */ + risc_size = be32_to_cpu(fw_array[3]); + risc_attr = be32_to_cpu(fw_array[9]); + + buf_size_without_sfub = risc_size; + fw_array += risc_size; + break; + + default: + ql_log(ql_log_warn + ql_dbg_verbose, vha, + 0xffff, "Secure region %x not supported\n", region.code); rval = QLA_COMMAND_ERROR; goto done; @@ -2878,7 +2894,7 @@ qla28xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, "Sending Secure Flash MB Cmd\n"); rval = qla28xx_secure_flash_update(vha, 0, region.code, buf_size_without_sfub, sfub_dma, - sizeof(struct secure_flash_update_block)); + sizeof(struct secure_flash_update_block) >> 2); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xffff, "Secure Flash MB Cmd failed %x.", rval); diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 1c1f63be6eed..70081b395fb2 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -188,18 +188,17 @@ static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked) static inline struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha, - uint8_t *d_id) + be_id_t d_id) { struct scsi_qla_host *host; - uint32_t key = 0; + uint32_t key; - if ((vha->d_id.b.area == d_id[1]) && (vha->d_id.b.domain == d_id[0]) && - (vha->d_id.b.al_pa == d_id[2])) + if (vha->d_id.b.area == d_id.area && + vha->d_id.b.domain == d_id.domain && + vha->d_id.b.al_pa == d_id.al_pa) return vha; - key = (uint32_t)d_id[0] << 16; - key |= (uint32_t)d_id[1] << 8; - key |= (uint32_t)d_id[2]; + key = be_to_port_id(d_id).b24; host = btree_lookup32(&vha->hw->tgt.host_map, key); if (!host) @@ -357,9 +356,9 @@ static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt, vha, 0xe03e, "qla_target(%d): Received ATIO_TYPE7 " "with unknown d_id %x:%x:%x\n", vha->vp_idx, - atio->u.isp24.fcp_hdr.d_id[0], - atio->u.isp24.fcp_hdr.d_id[1], - atio->u.isp24.fcp_hdr.d_id[2]); + atio->u.isp24.fcp_hdr.d_id.domain, + atio->u.isp24.fcp_hdr.d_id.area, + atio->u.isp24.fcp_hdr.d_id.al_pa); qlt_queue_unknown_atio(vha, atio, ha_locked); @@ -464,7 +463,7 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, case IMMED_NOTIFY_TYPE: { - struct scsi_qla_host *host = vha; + struct scsi_qla_host *host; struct imm_ntfy_from_isp *entry = (struct imm_ntfy_from_isp *)pkt; @@ -560,10 +559,8 @@ static int qla24xx_post_nack_work(struct scsi_qla_host *vha, fc_port_t *fcport, return qla2x00_post_work(vha, e); } -static -void qla2x00_async_nack_sp_done(void *s, int res) +static void qla2x00_async_nack_sp_done(srb_t *sp, int res) { - struct srb *sp = (struct srb *)s; struct scsi_qla_host *vha = sp->vha; unsigned long flags; @@ -599,7 +596,8 @@ void qla2x00_async_nack_sp_done(void *s, int res) spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags); } else { sp->fcport->login_retry = 0; - sp->fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(sp->fcport, + DSC_LOGIN_COMPLETE); sp->fcport->deleted = 0; sp->fcport->logout_on_delete = 1; } @@ -789,6 +787,8 @@ qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id, { struct qlt_plogi_ack_t *pla; + lockdep_assert_held(&vha->hw->hardware_lock); + list_for_each_entry(pla, &vha->plogi_ack_list, list) { if (pla->id.b24 == id->b24) { ql_dbg(ql_dbg_disc + ql_dbg_verbose, vha, 0x210d, @@ -954,11 +954,11 @@ void qlt_free_session_done(struct work_struct *work) struct qla_hw_data *ha = vha->hw; unsigned long flags; bool logout_started = false; - scsi_qla_host_t *base_vha; + scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); struct qlt_plogi_ack_t *own = sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN]; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084, + ql_dbg(ql_dbg_disc, vha, 0xf084, "%s: se_sess %p / sess %p from port %8phC loop_id %#04x" " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n", __func__, sess->se_sess, sess, sess->port_name, sess->loop_id, @@ -967,7 +967,7 @@ void qlt_free_session_done(struct work_struct *work) sess->send_els_logo); if (!IS_SW_RESV_ADDR(sess->d_id)) { - qla2x00_mark_device_lost(vha, sess, 0, 0); + qla2x00_mark_device_lost(vha, sess, 0); if (sess->send_els_logo) { qlt_port_logo_t logo; @@ -1021,15 +1021,19 @@ void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; + u16 cnt = 0; while (!READ_ONCE(sess->logout_completed)) { if (!traced) { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, + ql_dbg(ql_dbg_disc, vha, 0xf086, "%s: waiting for sess %p logout\n", __func__, sess); traced = true; } msleep(100); + cnt++; + if (cnt > 200) + break; } ql_dbg(ql_dbg_disc, vha, 0xf087, @@ -1042,6 +1046,10 @@ void qlt_free_session_done(struct work_struct *work) (struct imm_ntfy_from_isp *)sess->iocb, SRB_NACK_LOGO); } + spin_lock_irqsave(&vha->work_lock, flags); + sess->flags &= ~FCF_ASYNC_SENT; + spin_unlock_irqrestore(&vha->work_lock, flags); + spin_lock_irqsave(&ha->tgt.sess_lock, flags); if (sess->se_sess) { sess->se_sess = NULL; @@ -1049,7 +1057,7 @@ void qlt_free_session_done(struct work_struct *work) tgt->sess_count--; } - sess->disc_state = DSC_DELETED; + qla2x00_set_fcport_disc_state(sess, DSC_DELETED); sess->fw_login_state = DSC_LS_PORT_UNAVAIL; sess->deleted = QLA_SESS_DELETED; @@ -1101,26 +1109,20 @@ void qlt_free_session_done(struct work_struct *work) } } + sess->explicit_logout = 0; spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + sess->free_pending = 0; - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, + ql_dbg(ql_dbg_disc, vha, 0xf001, "Unregistration of sess %p %8phC finished fcp_cnt %d\n", sess, sess->port_name, vha->fcport_count); if (tgt && (tgt->sess_count == 0)) wake_up_all(&tgt->waitQ); - if (vha->fcport_count == 0) - wake_up_all(&vha->fcport_waitQ); - - base_vha = pci_get_drvdata(ha->pdev); - - sess->free_pending = 0; - - if (test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags)) - return; - - if ((!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { + if (!test_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags) && + !(vha->vp_idx && test_bit(VPORT_DELETE, &vha->dpc_flags)) && + (!tgt || !tgt->tgt_stop) && !LOOP_TRANSITION(vha)) { switch (vha->host->active_mode) { case MODE_INITIATOR: case MODE_DUAL: @@ -1133,6 +1135,9 @@ void qlt_free_session_done(struct work_struct *work) break; } } + + if (vha->fcport_count == 0) + wake_up_all(&vha->fcport_waitQ); } /* ha->tgt.sess_lock supposed to be held on entry */ @@ -1151,18 +1156,22 @@ void qlt_unreg_sess(struct fc_port *sess) return; } sess->free_pending = 1; + /* + * Use FCF_ASYNC_SENT flag to block other cmds used in sess + * management from being sent. + */ + sess->flags |= FCF_ASYNC_SENT; spin_unlock_irqrestore(&sess->vha->work_lock, flags); if (sess->se_sess) vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; - sess->disc_state = DSC_DELETE_PEND; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); sess->last_rscn_gen = sess->rscn_gen; sess->last_login_gen = sess->login_gen; - INIT_WORK(&sess->free_work, qlt_free_session_done); - schedule_work(&sess->free_work); + queue_work(sess->vha->hw->wq, &sess->free_work); } EXPORT_SYMBOL(qlt_unreg_sess); @@ -1209,7 +1218,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess) sess->logout_on_delete = 0; sess->logo_ack_needed = 0; sess->fw_login_state = DSC_LS_PORT_UNAVAIL; - sess->scan_state = 0; } } @@ -1259,7 +1267,8 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) sess->deleted = QLA_SESS_DELETION_IN_PROGRESS; spin_unlock_irqrestore(&sess->vha->work_lock, flags); - sess->disc_state = DSC_DELETE_PEND; + sess->prli_pend_timer = 0; + qla2x00_set_fcport_disc_state(sess, DSC_DELETE_PEND); qla24xx_chk_fcp_state(sess); @@ -1267,7 +1276,6 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess) "Scheduling sess %p for deletion %8phC\n", sess, sess->port_name); - INIT_WORK(&sess->del_work, qla24xx_delete_sess_fn); WARN_ON(!queue_work(sess->vha->hw->wq, &sess->del_work)); } @@ -1284,13 +1292,12 @@ static void qlt_clear_tgt_db(struct qla_tgt *tgt) /* At this point tgt could be already dead */ } -static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, +static int qla24xx_get_loop_id(struct scsi_qla_host *vha, be_id_t s_id, uint16_t *loop_id) { struct qla_hw_data *ha = vha->hw; dma_addr_t gid_list_dma; - struct gid_list_info *gid_list; - char *id_iter; + struct gid_list_info *gid_list, *gid; int res, rc, i; uint16_t entries; @@ -1313,19 +1320,17 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id, goto out_free_id_list; } - id_iter = (char *)gid_list; + gid = gid_list; res = -ENOENT; for (i = 0; i < entries; i++) { - struct gid_list_info *gid = (struct gid_list_info *)id_iter; - - if ((gid->al_pa == s_id[2]) && - (gid->area == s_id[1]) && - (gid->domain == s_id[0])) { + if (gid->al_pa == s_id.al_pa && + gid->area == s_id.area && + gid->domain == s_id.domain) { *loop_id = le16_to_cpu(gid->loop_id); res = 0; break; } - id_iter += ha->gid_list_info_size; + gid = (void *)gid + ha->gid_list_info_size; } out_free_id_list: @@ -1582,11 +1587,10 @@ static void qlt_release(struct qla_tgt *tgt) struct qla_qpair_hint *h; struct qla_hw_data *ha = vha->hw; - if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stop && - !tgt->tgt_stopped) + if (!tgt->tgt_stop && !tgt->tgt_stopped) qlt_stop_phase1(tgt); - if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped) + if (!tgt->tgt_stopped) qlt_stop_phase2(tgt); for (i = 0; i < vha->hw->max_qpairs + 1; i++) { @@ -1772,12 +1776,8 @@ static int qlt_build_abts_resp_iocb(struct qla_tgt_mgmt_cmd *mcmd) resp->fcp_hdr_le.f_ctl[1] = *p++; resp->fcp_hdr_le.f_ctl[2] = *p; - resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; - resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; - resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; - resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; - resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; - resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (mcmd->fc_tm_rsp == FCP_TMF_CMPL) { @@ -1848,19 +1848,11 @@ static void qlt_24xx_send_abts_resp(struct qla_qpair *qpair, resp->fcp_hdr_le.f_ctl[1] = *p++; resp->fcp_hdr_le.f_ctl[2] = *p; if (ids_reversed) { - resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0]; - resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1]; - resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2]; - resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0]; - resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1]; - resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2]; + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.d_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.s_id; } else { - resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0]; - resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1]; - resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2]; - resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0]; - resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1]; - resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2]; + resp->fcp_hdr_le.d_id = abts->fcp_hdr_le.s_id; + resp->fcp_hdr_le.s_id = abts->fcp_hdr_le.d_id; } resp->exchange_addr_to_abort = abts->exchange_addr_to_abort; if (status == FCP_TMF_CMPL) { @@ -1927,18 +1919,14 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, tmp = (CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE); if (mcmd) { - ctio->initiator_id[0] = entry->fcp_hdr_le.s_id[0]; - ctio->initiator_id[1] = entry->fcp_hdr_le.s_id[1]; - ctio->initiator_id[2] = entry->fcp_hdr_le.s_id[2]; + ctio->initiator_id = entry->fcp_hdr_le.s_id; if (mcmd->flags & QLA24XX_MGMT_ABORT_IO_ATTR_VALID) tmp |= (mcmd->abort_io_attr << 9); else if (qpair->retry_term_cnt & 1) tmp |= (0x4 << 9); } else { - ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0]; - ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1]; - ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2]; + ctio->initiator_id = entry->fcp_hdr_le.d_id; if (qpair->retry_term_cnt & 1) tmp |= (0x4 << 9); @@ -1972,8 +1960,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha, * XXX does not go through the list of other port (which may have cmds * for the same lun) */ -static void abort_cmds_for_lun(struct scsi_qla_host *vha, - u64 lun, uint8_t *s_id) +static void abort_cmds_for_lun(struct scsi_qla_host *vha, u64 lun, be_id_t s_id) { struct qla_tgt_sess_op *op; struct qla_tgt_cmd *cmd; @@ -2149,7 +2136,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct qla_hw_data *ha = vha->hw; struct fc_port *sess; uint32_t tag = abts->exchange_addr_to_abort; - uint8_t s_id[3]; + be_id_t s_id; int rc; unsigned long flags; @@ -2173,13 +2160,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011, "qla_target(%d): task abort (s_id=%x:%x:%x, " - "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2], - abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag, + "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id.domain, + abts->fcp_hdr_le.s_id.area, abts->fcp_hdr_le.s_id.al_pa, tag, le32_to_cpu(abts->fcp_hdr_le.parameter)); - s_id[0] = abts->fcp_hdr_le.s_id[2]; - s_id[1] = abts->fcp_hdr_le.s_id[1]; - s_id[2] = abts->fcp_hdr_le.s_id[0]; + s_id = le_id_to_be(abts->fcp_hdr_le.s_id); spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); @@ -2243,9 +2228,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct qla_qpair *qpair, ctio->nport_handle = mcmd->sess->loop_id; ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = ha->vp_idx; - ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9)| CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; @@ -2302,9 +2285,7 @@ void qlt_send_resp_ctio(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, ctio->nport_handle = cmd->sess->loop_id; ctio->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio->vp_index = vha->vp_idx; - ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS; @@ -2605,9 +2586,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_qpair *qpair, pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); - pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); pkt->exchange_addr = atio->u.isp24.exchange_addr; temp = atio->u.isp24.attr << 9; pkt->u.status0.flags |= cpu_to_le16(temp); @@ -3120,9 +3099,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) pkt->handle |= CTIO_COMPLETION_HANDLE_MARK; pkt->nport_handle = cpu_to_le16(prm->cmd->loop_id); pkt->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); - pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + pkt->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); pkt->exchange_addr = atio->u.isp24.exchange_addr; /* silence compile warning */ @@ -3164,7 +3141,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) pkt->crc_context_len = CRC_CONTEXT_LEN_FW; if (!bundling) { - cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd; + cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0]; } else { /* * Configure Bundling if we need to fetch interlaving @@ -3174,7 +3151,7 @@ qlt_build_ctio_crc2_pkt(struct qla_qpair *qpair, struct qla_tgt_prm *prm) crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes); crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(prm->tot_dsds - prm->prot_seg_cnt); - cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd; + cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0]; } /* Finish the common fields of CRC pkt */ @@ -3239,7 +3216,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, if (!qpair->fw_started || (cmd->reset_count != qpair->chip_reset) || (cmd->sess && cmd->sess->deleted)) { cmd->state = QLA_TGT_STATE_PROCESSED; - return 0; + res = 0; + goto free; } ql_dbg_qp(ql_dbg_tgt, qpair, 0xe018, @@ -3250,9 +3228,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, &full_req_cnt); - if (unlikely(res != 0)) { - return res; - } + if (unlikely(res != 0)) + goto free; spin_lock_irqsave(qpair->qp_lock_ptr, flags); @@ -3272,7 +3249,8 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, vha->flags.online, qla2x00_reset_active(vha), cmd->reset_count, qpair->chip_reset); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); - return 0; + res = 0; + goto free; } /* Does F/W have an IOCBs for this request */ @@ -3375,6 +3353,8 @@ out_unmap_unlock: qlt_unmap_sg(vha, cmd); spin_unlock_irqrestore(qpair->qp_lock_ptr, flags); +free: + vha->hw->tgt.tgt_ops->free_cmd(cmd); return res; } EXPORT_SYMBOL(qlt_xmit_response); @@ -3477,13 +3457,13 @@ qlt_handle_dif_error(struct qla_qpair *qpair, struct qla_tgt_cmd *cmd, cmd->trc_flags |= TRC_DIF_ERR; - cmd->a_guard = be16_to_cpu(*(uint16_t *)(ap + 0)); - cmd->a_app_tag = be16_to_cpu(*(uint16_t *)(ap + 2)); - cmd->a_ref_tag = be32_to_cpu(*(uint32_t *)(ap + 4)); + cmd->a_guard = get_unaligned_be16(ap + 0); + cmd->a_app_tag = get_unaligned_be16(ap + 2); + cmd->a_ref_tag = get_unaligned_be32(ap + 4); - cmd->e_guard = be16_to_cpu(*(uint16_t *)(ep + 0)); - cmd->e_app_tag = be16_to_cpu(*(uint16_t *)(ep + 2)); - cmd->e_ref_tag = be32_to_cpu(*(uint32_t *)(ep + 4)); + cmd->e_guard = get_unaligned_be16(ep + 0); + cmd->e_app_tag = get_unaligned_be16(ep + 2); + cmd->e_ref_tag = get_unaligned_be32(ep + 4); ql_dbg(ql_dbg_tgt_dif, vha, 0xf075, "%s: aborted %d state %d\n", __func__, cmd->aborted, cmd->state); @@ -3672,9 +3652,7 @@ static int __qlt_send_term_exchange(struct qla_qpair *qpair, ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED; ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; - ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio24->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_TERMINATE; @@ -4107,8 +4085,6 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha, return fcp_task_attr; } -static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *, - uint8_t *); /* * Process context for I/O path into tcm_qla2xxx code */ @@ -4352,9 +4328,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, return -ENODEV; } - id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; - id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; - id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; + id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); if (IS_SW_RESV_ADDR(id)) return -EBUSY; @@ -4616,7 +4590,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport_id collision */ if (port_id.b24 == other_sess->d_id.b24) { if (loop_id != other_sess->loop_id) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000c, + ql_dbg(ql_dbg_disc, vha, 0x1000c, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4632,7 +4606,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, * Another wwn used to have our s_id/loop_id * kill the session, but don't free the loop_id */ - ql_dbg(ql_dbg_tgt_tmr, vha, 0xf01b, + ql_dbg(ql_dbg_disc, vha, 0xf01b, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4647,7 +4621,7 @@ qlt_find_sess_invalidate_other(scsi_qla_host_t *vha, uint64_t wwn, /* find other sess with nport handle collision */ if ((loop_id == other_sess->loop_id) && (loop_id != FC_NO_LOOP_ID)) { - ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000d, + ql_dbg(ql_dbg_disc, vha, 0x1000d, "Invalidating sess %p loop_id %d wwn %llx.\n", other_sess, other_sess->loop_id, other_wwn); @@ -4716,6 +4690,8 @@ static int qlt_handle_login(struct scsi_qla_host *vha, struct qlt_plogi_ack_t *pla; unsigned long flags; + lockdep_assert_held(&vha->hw->hardware_lock); + wwn = wwn_to_u64(iocb->u.isp24.port_name); port_id.b.domain = iocb->u.isp24.port_id[2]; @@ -4799,8 +4775,10 @@ static int qlt_handle_login(struct scsi_qla_host *vha, __func__, sess->port_name, sec); } - if (!conflict_sess) + if (!conflict_sess) { + list_del(&pla->list); kmem_cache_free(qla_tgt_plogi_cachep, pla); + } qlt_send_term_imm_notif(vha, iocb, 1); goto out; @@ -4836,6 +4814,7 @@ static int qlt_handle_login(struct scsi_qla_host *vha, switch (sess->disc_state) { case DSC_DELETED: + case DSC_LOGIN_PEND: qlt_plogi_ack_unref(vha, pla); break; @@ -4889,6 +4868,8 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha, int res = 0; unsigned long flags; + lockdep_assert_held(&ha->hardware_lock); + wwn = wwn_to_u64(iocb->u.isp24.port_name); port_id.b.domain = iocb->u.isp24.port_id[2]; @@ -5165,6 +5146,8 @@ static void qlt_handle_imm_notify(struct scsi_qla_host *vha, int send_notify_ack = 1; uint16_t status; + lockdep_assert_held(&ha->hardware_lock); + status = le16_to_cpu(iocb->u.isp2x.status); switch (status) { case IMM_NTFY_LIP_RESET: @@ -5302,10 +5285,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair, u16 temp; port_id_t id; - id.b.al_pa = atio->u.isp24.fcp_hdr.s_id[2]; - id.b.area = atio->u.isp24.fcp_hdr.s_id[1]; - id.b.domain = atio->u.isp24.fcp_hdr.s_id[0]; - id.b.rsvd_1 = 0; + id = be_to_port_id(atio->u.isp24.fcp_hdr.s_id); spin_lock_irqsave(&ha->tgt.sess_lock, flags); sess = qla2x00_find_fcport_by_nportid(vha, &id, 1); @@ -5333,9 +5313,7 @@ static int __qlt_send_busy(struct qla_qpair *qpair, ctio24->nport_handle = sess->loop_id; ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT); ctio24->vp_index = vha->vp_idx; - ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2]; - ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1]; - ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; + ctio24->initiator_id = be_id_to_le(atio->u.isp24.fcp_hdr.s_id); ctio24->exchange_addr = atio->u.isp24.exchange_addr; temp = (atio->u.isp24.attr << 9) | CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS | @@ -5767,7 +5745,7 @@ static void qlt_handle_abts_completion(struct scsi_qla_host *vha, entry->error_subcode2); ha->tgt.tgt_ops->free_mcmd(mcmd); } - } else { + } else if (mcmd) { ha->tgt.tgt_ops->free_mcmd(mcmd); } } @@ -6086,7 +6064,7 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, if (!IS_SW_RESV_ADDR(fcport->d_id)) vha->fcport_count++; fcport->login_gen++; - fcport->disc_state = DSC_LOGIN_COMPLETE; + qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE); fcport->login_succ = 1; newfcport = 1; } @@ -6121,21 +6099,21 @@ static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha, /* Must be called under tgt_mutex */ static struct fc_port *qlt_make_local_sess(struct scsi_qla_host *vha, - uint8_t *s_id) + be_id_t s_id) { struct fc_port *sess = NULL; fc_port_t *fcport = NULL; int rc, global_resets; uint16_t loop_id = 0; - if ((s_id[0] == 0xFF) && (s_id[1] == 0xFC)) { + if (s_id.domain == 0xFF && s_id.area == 0xFC) { /* * This is Domain Controller, so it should be * OK to drop SCSI commands from it. */ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042, "Unable to find initiator with S_ID %x:%x:%x", - s_id[0], s_id[1], s_id[2]); + s_id.domain, s_id.area, s_id.al_pa); return NULL; } @@ -6152,13 +6130,12 @@ retry: ql_log(ql_log_info, vha, 0xf071, "qla_target(%d): Unable to find " "initiator with S_ID %x:%x:%x", - vha->vp_idx, s_id[0], s_id[1], - s_id[2]); + vha->vp_idx, s_id.domain, s_id.area, s_id.al_pa); if (rc == -ENOENT) { qlt_port_logo_t logo; - sid_to_portid(s_id, &logo.id); + logo.id = be_to_port_id(s_id); logo.cmd_count = 1; qlt_send_first_logo(vha, &logo); } @@ -6197,8 +6174,7 @@ static void qlt_abort_work(struct qla_tgt *tgt, struct qla_hw_data *ha = vha->hw; struct fc_port *sess = NULL; unsigned long flags = 0, flags2 = 0; - uint32_t be_s_id; - uint8_t s_id[3]; + be_id_t s_id; int rc; spin_lock_irqsave(&ha->tgt.sess_lock, flags2); @@ -6206,12 +6182,9 @@ static void qlt_abort_work(struct qla_tgt *tgt, if (tgt->tgt_stop) goto out_term2; - s_id[0] = prm->abts.fcp_hdr_le.s_id[2]; - s_id[1] = prm->abts.fcp_hdr_le.s_id[1]; - s_id[2] = prm->abts.fcp_hdr_le.s_id[0]; + s_id = le_id_to_be(prm->abts.fcp_hdr_le.s_id); - sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, - (unsigned char *)&be_s_id); + sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id); if (!sess) { spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); @@ -6248,9 +6221,6 @@ static void qlt_abort_work(struct qla_tgt *tgt, out_term2: spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2); - if (sess) - ha->tgt.tgt_ops->put_sess(sess); - out_term: spin_lock_irqsave(&ha->hardware_lock, flags); qlt_24xx_send_abts_resp(ha->base_qpair, &prm->abts, @@ -6266,7 +6236,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, struct qla_hw_data *ha = vha->hw; struct fc_port *sess; unsigned long flags; - uint8_t *s_id = NULL; /* to hide compiler warnings */ + be_id_t s_id; int rc; u64 unpacked_lun; int fn; @@ -6495,22 +6465,10 @@ void qlt_remove_target_resources(struct qla_hw_data *ha) static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn, unsigned char *b) { - int i; - - pr_debug("qla2xxx HW vha->node_name: "); - for (i = 0; i < WWN_SIZE; i++) - pr_debug("%02x ", vha->node_name[i]); - pr_debug("\n"); - pr_debug("qla2xxx HW vha->port_name: "); - for (i = 0; i < WWN_SIZE; i++) - pr_debug("%02x ", vha->port_name[i]); - pr_debug("\n"); - - pr_debug("qla2xxx passed configfs WWPN: "); + pr_debug("qla2xxx HW vha->node_name: %8phC\n", vha->node_name); + pr_debug("qla2xxx HW vha->port_name: %8phC\n", vha->port_name); put_unaligned_be64(wwpn, b); - for (i = 0; i < WWN_SIZE; i++) - pr_debug("%02x ", b[i]); - pr_debug("\n"); + pr_debug("qla2xxx passed configfs WWPN: %8phC\n", b); } /** @@ -6671,6 +6629,8 @@ qlt_enable_vha(struct scsi_qla_host *vha) if (vha->qlini_mode == QLA2XXX_INI_MODE_ENABLED) return; + if (ha->tgt.num_act_qpairs > ha->max_qpairs) + ha->tgt.num_act_qpairs = ha->max_qpairs; spin_lock_irqsave(&ha->hardware_lock, flags); tgt->tgt_stopped = 0; qlt_set_mode(vha); @@ -6685,7 +6645,8 @@ qlt_enable_vha(struct scsi_qla_host *vha) } else { set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags); qla2xxx_wake_dpc(base_vha); - qla2x00_wait_for_hba_online(base_vha); + WARN_ON_ONCE(qla2x00_wait_for_hba_online(base_vha) != + QLA_SUCCESS); } mutex_unlock(&ha->optrom_mutex); } @@ -6716,7 +6677,9 @@ static void qlt_disable_vha(struct scsi_qla_host *vha) set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); qla2xxx_wake_dpc(vha); - qla2x00_wait_for_hba_online(vha); + if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) + ql_dbg(ql_dbg_tgt, vha, 0xe081, + "qla2x00_wait_for_hba_online() failed\n"); } /* @@ -6815,7 +6778,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked) */ ql_log(ql_log_warn, vha, 0xd03c, "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n", - pkt->u.isp24.fcp_hdr.s_id, + &pkt->u.isp24.fcp_hdr.s_id, be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id), le32_to_cpu(pkt->u.isp24.exchange_addr), pkt); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index b8d244f1e189..6539499e9e95 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -247,9 +247,9 @@ struct ctio_to_2xxx { struct fcp_hdr { uint8_t r_ctl; - uint8_t d_id[3]; + be_id_t d_id; uint8_t cs_ctl; - uint8_t s_id[3]; + be_id_t s_id; uint8_t type; uint8_t f_ctl[3]; uint8_t seq_id; @@ -261,9 +261,9 @@ struct fcp_hdr { } __packed; struct fcp_hdr_le { - uint8_t d_id[3]; + le_id_t d_id; uint8_t r_ctl; - uint8_t s_id[3]; + le_id_t s_id; uint8_t cs_ctl; uint8_t f_ctl[3]; uint8_t type; @@ -379,8 +379,7 @@ static inline int get_datalen_for_atio(struct atio_from_isp *atio) { int len = atio->u.isp24.fcp_cmnd.add_cdb_len; - return (be32_to_cpu(get_unaligned((uint32_t *) - &atio->u.isp24.fcp_cmnd.add_cdb[len * 4]))); + return get_unaligned_be32(&atio->u.isp24.fcp_cmnd.add_cdb[len * 4]); } #define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */ @@ -402,7 +401,7 @@ struct ctio7_to_24xx { uint16_t dseg_count; /* Data segment count. */ uint8_t vp_index; uint8_t add_flags; - uint8_t initiator_id[3]; + le_id_t initiator_id; uint8_t reserved; uint32_t exchange_addr; union { @@ -498,7 +497,7 @@ struct ctio_crc2_to_fw { uint8_t add_flags; /* additional flags */ #define CTIO_CRC2_AF_DIF_DSD_ENA BIT_3 - uint8_t initiator_id[3]; /* initiator ID */ + le_id_t initiator_id; /* initiator ID */ uint8_t reserved1; uint32_t exchange_addr; /* rcv exchange address */ uint16_t reserved2; @@ -682,7 +681,7 @@ struct qla_tgt_func_tmpl { struct fc_port *(*find_sess_by_loop_id)(struct scsi_qla_host *, const uint16_t); struct fc_port *(*find_sess_by_s_id)(struct scsi_qla_host *, - const uint8_t *); + const be_id_t); void (*clear_nacl_from_fcport_map)(struct fc_port *); void (*put_sess)(struct fc_port *); void (*shutdown_sess)(struct fc_port *); @@ -912,7 +911,7 @@ struct qla_tgt_cmd { uint8_t scsi_status, sense_key, asc, ascq; struct crc_context *ctx; - uint8_t *cdb; + const uint8_t *cdb; uint64_t lba; uint16_t a_guard, e_guard, a_app_tag, e_app_tag; uint32_t a_ref_tag, e_ref_tag; @@ -1030,22 +1029,11 @@ static inline bool qla_dual_mode_enabled(struct scsi_qla_host *ha) return (ha->host->active_mode == MODE_DUAL); } -static inline uint32_t sid_to_key(const uint8_t *s_id) +static inline uint32_t sid_to_key(const be_id_t s_id) { - uint32_t key; - - key = (((unsigned long)s_id[0] << 16) | - ((unsigned long)s_id[1] << 8) | - (unsigned long)s_id[2]); - return key; -} - -static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) -{ - memset(p, 0, sizeof(*p)); - p->b.domain = s_id[0]; - p->b.area = s_id[1]; - p->b.al_pa = s_id[2]; + return s_id.domain << 16 | + s_id.area << 8 | + s_id.al_pa; } /* diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c index de696a07532e..5b0c057def2b 100644 --- a/drivers/scsi/qla2xxx/qla_tmpl.c +++ b/drivers/scsi/qla2xxx/qla_tmpl.c @@ -10,6 +10,7 @@ #define ISPREG(vha) (&(vha)->hw->iobase->isp24) #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr) #define IOBASE(vha) IOBAR(ISPREG(vha)) +#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL) static inline void qla27xx_insert16(uint16_t value, void *buf, ulong *len) @@ -261,6 +262,7 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, ulong start = le32_to_cpu(ent->t262.start_addr); ulong end = le32_to_cpu(ent->t262.end_addr); ulong dwords; + int rc; ql_dbg(ql_dbg_misc, vha, 0xd206, "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len); @@ -308,7 +310,13 @@ qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha, dwords = end - start + 1; if (buf) { buf += *len; - qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); + rc = qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf); + if (rc != QLA_SUCCESS) { + ql_dbg(ql_dbg_async, vha, 0xffff, + "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n", + __func__, area, start, end); + return INVALID_ENTRY; + } } *len += dwords * sizeof(uint32_t); done: @@ -429,7 +437,7 @@ qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha, ql_dbg(ql_dbg_misc, vha, 0xd20a, "%s: reset risc [%lx]\n", __func__, *len); if (buf) - qla24xx_soft_reset(vha->hw); + WARN_ON_ONCE(qla24xx_soft_reset(vha->hw) != QLA_SUCCESS); return qla27xx_next_entry(ent); } @@ -838,6 +846,13 @@ qla27xx_walk_template(struct scsi_qla_host *vha, ent = qla27xx_find_entry(type)(vha, ent, buf, len); if (!ent) break; + + if (ent == INVALID_ENTRY) { + *len = 0; + ql_dbg(ql_dbg_async, vha, 0xffff, + "Unable to capture FW dump"); + goto bailout; + } } if (tmp->count) @@ -847,6 +862,9 @@ qla27xx_walk_template(struct scsi_qla_host *vha, if (ent) ql_dbg(ql_dbg_misc, vha, 0xd019, "%s: missing end entry\n", __func__); + +bailout: + cpu_to_le32s(&tmp->count); /* endianize residual count */ } static void @@ -860,8 +878,9 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp) { uint8_t v[] = { 0, 0, 0, 0, 0, 0 }; - sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", - v+0, v+1, v+2, v+3, v+4, v+5); + WARN_ON_ONCE(sscanf(qla2x00_version_str, + "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu", + v+0, v+1, v+2, v+3, v+4, v+5) != 6); tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]; tmp->driver_info[1] = v[5] << 8 | v[4]; @@ -998,8 +1017,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) uint j; ulong len; void *buf = vha->hw->fw_dump; + uint count = vha->hw->fw_dump_mpi ? 2 : 1; - for (j = 0; j < 2; j++, fwdt++, buf += len) { + for (j = 0; j < count; j++, fwdt++, buf += len) { ql_log(ql_log_warn, vha, 0xd011, "-> fwdt%u running...\n", j); if (!fwdt->template) { @@ -1009,7 +1029,9 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) } len = qla27xx_execute_fwdt_template(vha, fwdt->template, buf); - if (len != fwdt->dump_size) { + if (len == 0) { + goto bailout; + } else if (len != fwdt->dump_size) { ql_log(ql_log_warn, vha, 0xd013, "-> fwdt%u fwdump residual=%+ld\n", j, fwdt->dump_size - len); @@ -1024,6 +1046,8 @@ qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked) qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); } +bailout: + vha->hw->fw_dump_mpi = 0; #ifndef __CHECKER__ if (!hardware_locked) spin_unlock_irqrestore(&vha->hw->hardware_lock, flags); diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h index cd6bdf71e533..bb03c022e023 100644 --- a/drivers/scsi/qla2xxx/qla_version.h +++ b/drivers/scsi/qla2xxx/qla_version.h @@ -7,7 +7,7 @@ /* * Driver version */ -#define QLA2XXX_VERSION "10.01.00.16-k" +#define QLA2XXX_VERSION "10.01.00.22-k" #define QLA_DRIVER_MAJOR_VER 10 #define QLA_DRIVER_MINOR_VER 1 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d15412d3d9bd..abe7f79bb789 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -246,6 +246,8 @@ static void tcm_qla2xxx_complete_mcmd(struct work_struct *work) */ static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd) { + if (!mcmd) + return; INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd); queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work); } @@ -348,6 +350,7 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess) target_sess_cmd_list_set_waiting(se_sess); spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags); + sess->explicit_logout = 1; tcm_qla2xxx_put_sess(sess); } @@ -620,6 +623,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + struct scsi_qla_host *vha = cmd->vha; if (cmd->aborted) { /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task @@ -632,6 +636,7 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } @@ -659,6 +664,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) { struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); + struct scsi_qla_host *vha = cmd->vha; int xmit_type = QLA_TGT_XMIT_STATUS; if (cmd->aborted) { @@ -672,6 +678,7 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd) cmd, kref_read(&cmd->se_cmd.cmd_kref), cmd->se_cmd.transport_state, cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags); + vha->hw->tgt.tgt_ops->free_cmd(cmd); return 0; } cmd->bufflen = se_cmd->data_length; @@ -1136,9 +1143,8 @@ static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(struct se_wwn *wwn, /* * Expected to be called with struct qla_hw_data->tgt.sess_lock held */ -static struct fc_port *tcm_qla2xxx_find_sess_by_s_id( - scsi_qla_host_t *vha, - const uint8_t *s_id) +static struct fc_port *tcm_qla2xxx_find_sess_by_s_id(scsi_qla_host_t *vha, + const be_id_t s_id) { struct tcm_qla2xxx_lport *lport; struct se_node_acl *se_nacl; @@ -1181,7 +1187,7 @@ static void tcm_qla2xxx_set_sess_by_s_id( struct tcm_qla2xxx_nacl *nacl, struct se_session *se_sess, struct fc_port *fc_port, - uint8_t *s_id) + be_id_t s_id) { u32 key; void *slot; @@ -1348,14 +1354,9 @@ static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport, struct tcm_qla2xxx_nacl *nacl, struct fc_port *sess) { struct se_session *se_sess = sess->se_sess; - unsigned char be_sid[3]; - - be_sid[0] = sess->d_id.b.domain; - be_sid[1] = sess->d_id.b.area; - be_sid[2] = sess->d_id.b.al_pa; tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess, - sess, be_sid); + sess, port_id_to_be_id(sess->d_id)); tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess, sess, sess->loop_id); } @@ -1401,19 +1402,14 @@ static int tcm_qla2xxx_session_cb(struct se_portal_group *se_tpg, struct fc_port *qlat_sess = p; uint16_t loop_id = qlat_sess->loop_id; unsigned long flags; - unsigned char be_sid[3]; - - be_sid[0] = qlat_sess->d_id.b.domain; - be_sid[1] = qlat_sess->d_id.b.area; - be_sid[2] = qlat_sess->d_id.b.al_pa; /* * And now setup se_nacl and session pointers into HW lport internal * mappings for fabric S_ID and LOOP_ID. */ spin_lock_irqsave(&ha->tgt.sess_lock, flags); - tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, - se_sess, qlat_sess, be_sid); + tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess, qlat_sess, + port_id_to_be_id(qlat_sess->d_id)); tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess, qlat_sess, loop_id); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index dac9a7013208..02636b4785c5 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -640,9 +640,6 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { - dma_free_coherent(&ha->pdev->dev, - sizeof(struct addr_ctrl_blk), - init_fw_cb, init_fw_cb_dma); goto exit_init_fw_cb; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 8c674eca09f1..5504ab11decc 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -4145,7 +4145,7 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, ha->queues_dma); - if (ha->fw_dump) + if (ha->fw_dump) vfree(ha->fw_dump); ha->queues_len = 0; @@ -4275,7 +4275,6 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) return QLA_SUCCESS; mem_alloc_error_exit: - qla4xxx_mem_free(ha); return QLA_ERROR; } diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9335849f6bea..d539beef3ce8 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -200,10 +200,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Write mailbox command registers. */ switch (mbox_param[param[0]] >> 4) { case 6: sbus_writew(param[5], qpti->qregs + MBOX5); + /* Fall through */ case 5: sbus_writew(param[4], qpti->qregs + MBOX4); + /* Fall through */ case 4: sbus_writew(param[3], qpti->qregs + MBOX3); + /* Fall through */ case 3: sbus_writew(param[2], qpti->qregs + MBOX2); + /* Fall through */ case 2: sbus_writew(param[1], qpti->qregs + MBOX1); + /* Fall through */ case 1: sbus_writew(param[0], qpti->qregs + MBOX0); } @@ -254,10 +259,15 @@ static int qlogicpti_mbox_command(struct qlogicpti *qpti, u_short param[], int f /* Read back output parameters. */ switch (mbox_param[param[0]] & 0xf) { case 6: param[5] = sbus_readw(qpti->qregs + MBOX5); + /* Fall through */ case 5: param[4] = sbus_readw(qpti->qregs + MBOX4); + /* Fall through */ case 4: param[3] = sbus_readw(qpti->qregs + MBOX3); + /* Fall through */ case 3: param[2] = sbus_readw(qpti->qregs + MBOX2); + /* Fall through */ case 2: param[1] = sbus_readw(qpti->qregs + MBOX1); + /* Fall through */ case 1: param[0] = sbus_readw(qpti->qregs + MBOX0); } diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 1f5b5c8a7f72..930e4803d888 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -186,7 +186,7 @@ void scsi_finish_command(struct scsi_cmnd *cmd) struct scsi_driver *drv; unsigned int good_bytes; - scsi_device_unbusy(sdev); + scsi_device_unbusy(sdev, cmd); /* * Clear the flags that say that the device/target/host is no longer @@ -434,8 +434,8 @@ static void scsi_update_vpd_page(struct scsi_device *sdev, u8 page, return; mutex_lock(&sdev->inquiry_mutex); - rcu_swap_protected(*sdev_vpd_buf, vpd_buf, - lockdep_is_held(&sdev->inquiry_mutex)); + vpd_buf = rcu_replace_pointer(*sdev_vpd_buf, vpd_buf, + lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); if (vpd_buf) @@ -465,10 +465,14 @@ void scsi_attach_vpd(struct scsi_device *sdev) return; for (i = 4; i < vpd_buf->len; i++) { + if (vpd_buf->data[i] == 0x0) + scsi_update_vpd_page(sdev, 0x0, &sdev->vpd_pg0); if (vpd_buf->data[i] == 0x80) scsi_update_vpd_page(sdev, 0x80, &sdev->vpd_pg80); if (vpd_buf->data[i] == 0x83) scsi_update_vpd_page(sdev, 0x83, &sdev->vpd_pg83); + if (vpd_buf->data[i] == 0x89) + scsi_update_vpd_page(sdev, 0x89, &sdev->vpd_pg89); } kfree(vpd_buf); } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index d323523f5f9d..44cb054d5e66 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1025,7 +1025,7 @@ static int fill_from_dev_buffer(struct scsi_cmnd *scp, unsigned char *arr, static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, int arr_len, unsigned int off_dst) { - int act_len, n; + unsigned int act_len, n; struct scsi_data_buffer *sdb = &scp->sdb; off_t skip = off_dst; @@ -1039,7 +1039,7 @@ static int p_fill_from_dev_buffer(struct scsi_cmnd *scp, const void *arr, pr_debug("%s: off_dst=%u, scsi_bufflen=%u, act_len=%u, resid=%d\n", __func__, off_dst, scsi_bufflen(scp), act_len, scsi_get_resid(scp)); - n = (int)scsi_bufflen(scp) - ((int)off_dst + act_len); + n = scsi_bufflen(scp) - (off_dst + act_len); scsi_set_resid(scp, min(scsi_get_resid(scp), n)); return 0; } @@ -5263,6 +5263,11 @@ static int __init scsi_debug_init(void) return -EINVAL; } + if (sdebug_num_tgts < 0) { + pr_err("num_tgts must be >= 0\n"); + return -EINVAL; + } + if (sdebug_guard > 1) { pr_err("guard must be 0 or 1\n"); return -EINVAL; diff --git a/drivers/scsi/scsi_debugfs.c b/drivers/scsi/scsi_debugfs.c index c5a8756384bc..c19ea7ab54cb 100644 --- a/drivers/scsi/scsi_debugfs.c +++ b/drivers/scsi/scsi_debugfs.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#include <linux/bitops.h> #include <linux/seq_file.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_dbg.h> @@ -18,9 +19,7 @@ static int scsi_flags_show(struct seq_file *m, const unsigned long flags, bool sep = false; int i; - for (i = 0; i < sizeof(flags) * BITS_PER_BYTE; i++) { - if (!(flags & BIT(i))) - continue; + for_each_set_bit(i, &flags, BITS_PER_LONG) { if (sep) seq_puts(m, "|"); sep = true; diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index df14597752ec..eed31021e788 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -736,13 +736,12 @@ out: return err; } -static const struct file_operations scsi_devinfo_proc_fops = { - .owner = THIS_MODULE, - .open = proc_scsi_devinfo_open, - .read = seq_read, - .write = proc_scsi_devinfo_write, - .llseek = seq_lseek, - .release = seq_release, +static const struct proc_ops scsi_devinfo_proc_ops = { + .proc_open = proc_scsi_devinfo_open, + .proc_read = seq_read, + .proc_write = proc_scsi_devinfo_write, + .proc_lseek = seq_lseek, + .proc_release = seq_release, }; #endif /* CONFIG_SCSI_PROC_FS */ @@ -867,7 +866,7 @@ int __init scsi_init_devinfo(void) } #ifdef CONFIG_SCSI_PROC_FS - p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops); + p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_ops); if (!p) { error = -ENOMEM; goto out; diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 1c470e31ae81..ae2fa170f6ad 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -967,6 +967,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, ses->data_direction = scmd->sc_data_direction; ses->sdb = scmd->sdb; ses->result = scmd->result; + ses->resid_len = scmd->req.resid_len; ses->underflow = scmd->underflow; ses->prot_op = scmd->prot_op; ses->eh_eflags = scmd->eh_eflags; @@ -977,6 +978,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses, memset(scmd->cmnd, 0, BLK_MAX_CDB); memset(&scmd->sdb, 0, sizeof(scmd->sdb)); scmd->result = 0; + scmd->req.resid_len = 0; if (sense_bytes) { scmd->sdb.length = min_t(unsigned, SCSI_SENSE_BUFFERSIZE, @@ -1029,6 +1031,7 @@ void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses) scmd->sc_data_direction = ses->data_direction; scmd->sdb = ses->sdb; scmd->result = ses->result; + scmd->req.resid_len = ses->resid_len; scmd->underflow = ses->underflow; scmd->prot_op = ses->prot_op; scmd->eh_eflags = ses->eh_eflags; diff --git a/drivers/scsi/scsi_ioctl.c b/drivers/scsi/scsi_ioctl.c index 57bcd05605bf..8f3af87b6bb0 100644 --- a/drivers/scsi/scsi_ioctl.c +++ b/drivers/scsi/scsi_ioctl.c @@ -189,17 +189,7 @@ static int scsi_ioctl_get_pci(struct scsi_device *sdev, void __user *arg) } -/** - * scsi_ioctl - Dispatch ioctl to scsi device - * @sdev: scsi device receiving ioctl - * @cmd: which ioctl is it - * @arg: data associated with ioctl - * - * Description: The scsi_ioctl() function differs from most ioctls in that it - * does not take a major/minor number as the dev field. Rather, it takes - * a pointer to a &struct scsi_device. - */ -int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +static int scsi_ioctl_common(struct scsi_device *sdev, int cmd, void __user *arg) { char scsi_cmd[MAX_COMMAND_SIZE]; struct scsi_sense_hdr sense_hdr; @@ -266,14 +256,50 @@ int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) return scsi_ioctl_get_pci(sdev, arg); case SG_SCSI_RESET: return scsi_ioctl_reset(sdev, arg); - default: - if (sdev->host->hostt->ioctl) - return sdev->host->hostt->ioctl(sdev, cmd, arg); } + return -ENOIOCTLCMD; +} + +/** + * scsi_ioctl - Dispatch ioctl to scsi device + * @sdev: scsi device receiving ioctl + * @cmd: which ioctl is it + * @arg: data associated with ioctl + * + * Description: The scsi_ioctl() function differs from most ioctls in that it + * does not take a major/minor number as the dev field. Rather, it takes + * a pointer to a &struct scsi_device. + */ +int scsi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +{ + int ret = scsi_ioctl_common(sdev, cmd, arg); + + if (ret != -ENOIOCTLCMD) + return ret; + + if (sdev->host->hostt->ioctl) + return sdev->host->hostt->ioctl(sdev, cmd, arg); + return -EINVAL; } EXPORT_SYMBOL(scsi_ioctl); +#ifdef CONFIG_COMPAT +int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) +{ + int ret = scsi_ioctl_common(sdev, cmd, arg); + + if (ret != -ENOIOCTLCMD) + return ret; + + if (sdev->host->hostt->compat_ioctl) + return sdev->host->hostt->compat_ioctl(sdev, cmd, arg); + + return ret; +} +EXPORT_SYMBOL(scsi_compat_ioctl); +#endif + /* * We can process a reset even when a device isn't fully operable. */ diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 11e64b50497f..610ee41fa54c 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -189,7 +189,7 @@ static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) * active on the host/device. */ if (unbusy) - scsi_device_unbusy(device); + scsi_device_unbusy(device, cmd); /* * Requeue this command. It will go before all other commands @@ -321,20 +321,20 @@ static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) } /* - * Decrement the host_busy counter and wake up the error handler if necessary. - * Avoid as follows that the error handler is not woken up if shost->host_busy - * == shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination - * with an RCU read lock in this function to ensure that this function in its - * entirety either finishes before scsi_eh_scmd_add() increases the + * Wake up the error handler if necessary. Avoid as follows that the error + * handler is not woken up if host in-flight requests number == + * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination + * with an RCU read lock in this function to ensure that this function in + * its entirety either finishes before scsi_eh_scmd_add() increases the * host_failed counter or that it notices the shost state change made by * scsi_eh_scmd_add(). */ -static void scsi_dec_host_busy(struct Scsi_Host *shost) +static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) { unsigned long flags; rcu_read_lock(); - atomic_dec(&shost->host_busy); + __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); if (unlikely(scsi_host_in_recovery(shost))) { spin_lock_irqsave(shost->host_lock, flags); if (shost->host_failed || shost->host_eh_scheduled) @@ -344,12 +344,12 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost) rcu_read_unlock(); } -void scsi_device_unbusy(struct scsi_device *sdev) +void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) { struct Scsi_Host *shost = sdev->host; struct scsi_target *starget = scsi_target(sdev); - scsi_dec_host_busy(shost); + scsi_dec_host_busy(shost, cmd); if (starget->can_queue > 0) atomic_dec(&starget->target_busy); @@ -430,9 +430,6 @@ static inline bool scsi_target_is_busy(struct scsi_target *starget) static inline bool scsi_host_is_busy(struct Scsi_Host *shost) { - if (shost->can_queue > 0 && - atomic_read(&shost->host_busy) >= shost->can_queue) - return true; if (atomic_read(&shost->host_blocked) > 0) return true; if (shost->host_self_blocked) @@ -1089,6 +1086,18 @@ static void scsi_initialize_rq(struct request *rq) cmd->retries = 0; } +/* + * Only called when the request isn't completed by SCSI, and not freed by + * SCSI + */ +static void scsi_cleanup_rq(struct request *rq) +{ + if (rq->rq_flags & RQF_DONTPREP) { + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq)); + rq->rq_flags &= ~RQF_DONTPREP; + } +} + /* Add a command to the list used by the aacraid and dpt_i2o drivers */ void scsi_add_cmd_to_list(struct scsi_cmnd *cmd) { @@ -1127,6 +1136,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) unsigned int flags = cmd->flags & SCMD_PRESERVED_FLAGS; unsigned long jiffies_at_alloc; int retries; + bool in_flight; if (!blk_rq_is_scsi(rq) && !(flags & SCMD_INITIALIZED)) { flags |= SCMD_INITIALIZED; @@ -1135,6 +1145,7 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) jiffies_at_alloc = cmd->jiffies_at_alloc; retries = cmd->retries; + in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); /* zero out the cmd, except for the embedded scsi_request */ memset((char *)cmd + sizeof(cmd->req), 0, sizeof(*cmd) - sizeof(cmd->req) + dev->host->hostt->cmd_size); @@ -1146,6 +1157,8 @@ void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); cmd->jiffies_at_alloc = jiffies_at_alloc; cmd->retries = retries; + if (in_flight) + __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); scsi_add_cmd_to_list(cmd); } @@ -1355,16 +1368,14 @@ out_dec: */ static inline int scsi_host_queue_ready(struct request_queue *q, struct Scsi_Host *shost, - struct scsi_device *sdev) + struct scsi_device *sdev, + struct scsi_cmnd *cmd) { - unsigned int busy; - if (scsi_host_in_recovery(shost)) return 0; - busy = atomic_inc_return(&shost->host_busy) - 1; if (atomic_read(&shost->host_blocked) > 0) { - if (busy) + if (scsi_host_busy(shost) > 0) goto starved; /* @@ -1378,8 +1389,6 @@ static inline int scsi_host_queue_ready(struct request_queue *q, "unblocking host at zero depth\n")); } - if (shost->can_queue > 0 && busy >= shost->can_queue) - goto starved; if (shost->host_self_blocked) goto starved; @@ -1391,6 +1400,8 @@ static inline int scsi_host_queue_ready(struct request_queue *q, spin_unlock_irq(shost->host_lock); } + __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); + return 1; starved: @@ -1399,7 +1410,7 @@ starved: list_add_tail(&sdev->starved_entry, &shost->starved_list); spin_unlock_irq(shost->host_lock); out_dec: - scsi_dec_host_busy(shost); + scsi_dec_host_busy(shost, cmd); return 0; } @@ -1653,7 +1664,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, ret = BLK_STS_RESOURCE; if (!scsi_target_queue_ready(shost, sdev)) goto out_put_budget; - if (!scsi_host_queue_ready(q, shost, sdev)) + if (!scsi_host_queue_ready(q, shost, sdev, cmd)) goto out_dec_target_busy; if (!(req->rq_flags & RQF_DONTPREP)) { @@ -1666,10 +1677,11 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(req); } + cmd->flags &= SCMD_PRESERVED_FLAGS; if (sdev->simple_tags) cmd->flags |= SCMD_TAGGED; - else - cmd->flags &= ~SCMD_TAGGED; + if (bd->last) + cmd->flags |= SCMD_LAST; scsi_init_cmd_errh(cmd); cmd->scsi_done = scsi_mq_done; @@ -1684,7 +1696,7 @@ static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, return BLK_STS_OK; out_dec_host_busy: - scsi_dec_host_busy(shost); + scsi_dec_host_busy(shost, cmd); out_dec_target_busy: if (scsi_target(sdev)->can_queue > 0) atomic_dec(&scsi_target(sdev)->target_busy); @@ -1809,10 +1821,38 @@ void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) } EXPORT_SYMBOL_GPL(__scsi_init_queue); +static const struct blk_mq_ops scsi_mq_ops_no_commit = { + .get_budget = scsi_mq_get_budget, + .put_budget = scsi_mq_put_budget, + .queue_rq = scsi_queue_rq, + .complete = scsi_softirq_done, + .timeout = scsi_timeout, +#ifdef CONFIG_BLK_DEBUG_FS + .show_rq = scsi_show_rq, +#endif + .init_request = scsi_mq_init_request, + .exit_request = scsi_mq_exit_request, + .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, + .busy = scsi_mq_lld_busy, + .map_queues = scsi_map_queues, +}; + + +static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) +{ + struct request_queue *q = hctx->queue; + struct scsi_device *sdev = q->queuedata; + struct Scsi_Host *shost = sdev->host; + + shost->hostt->commit_rqs(shost, hctx->queue_num); +} + static const struct blk_mq_ops scsi_mq_ops = { .get_budget = scsi_mq_get_budget, .put_budget = scsi_mq_put_budget, .queue_rq = scsi_queue_rq, + .commit_rqs = scsi_commit_rqs, .complete = scsi_softirq_done, .timeout = scsi_timeout, #ifdef CONFIG_BLK_DEBUG_FS @@ -1821,6 +1861,7 @@ static const struct blk_mq_ops scsi_mq_ops = { .init_request = scsi_mq_init_request, .exit_request = scsi_mq_exit_request, .initialize_rq_fn = scsi_initialize_rq, + .cleanup_rq = scsi_cleanup_rq, .busy = scsi_mq_lld_busy, .map_queues = scsi_map_queues, }; @@ -1841,14 +1882,18 @@ int scsi_mq_setup_tags(struct Scsi_Host *shost) { unsigned int cmd_size, sgl_size; - sgl_size = scsi_mq_inline_sgl_size(shost); + sgl_size = max_t(unsigned int, sizeof(struct scatterlist), + scsi_mq_inline_sgl_size(shost)); cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; if (scsi_host_get_prot(shost)) cmd_size += sizeof(struct scsi_data_buffer) + sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; memset(&shost->tag_set, 0, sizeof(shost->tag_set)); - shost->tag_set.ops = &scsi_mq_ops; + if (shost->hostt->commit_rqs) + shost->tag_set.ops = &scsi_mq_ops; + else + shost->tag_set.ops = &scsi_mq_ops_no_commit; shost->tag_set.nr_hw_queues = shost->nr_hw_queues ? : 1; shost->tag_set.queue_depth = shost->can_queue; shost->tag_set.cmd_size = cmd_size; @@ -1877,7 +1922,8 @@ struct scsi_device *scsi_device_from_queue(struct request_queue *q) { struct scsi_device *sdev = NULL; - if (q->mq_ops == &scsi_mq_ops) + if (q->mq_ops == &scsi_mq_ops_no_commit || + q->mq_ops == &scsi_mq_ops) sdev = q->queuedata; if (!sdev || !get_device(&sdev->sdev_gendev)) sdev = NULL; @@ -2062,6 +2108,8 @@ scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, memset(data, 0, sizeof(*data)); memset(&cmd[0], 0, 12); + + dbd = sdev->set_dbd_for_ms ? 8 : dbd; cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ cmd[2] = modepage; @@ -2678,6 +2726,14 @@ void scsi_start_queue(struct scsi_device *sdev) int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, enum scsi_device_state new_state) { + switch (new_state) { + case SDEV_RUNNING: + case SDEV_TRANSPORT_OFFLINE: + break; + default: + return -EINVAL; + } + /* * Try to transition the scsi device to SDEV_RUNNING or one of the * offlined states and goose the device queue if successful. @@ -2735,7 +2791,12 @@ static int scsi_internal_device_unblock(struct scsi_device *sdev, static void device_block(struct scsi_device *sdev, void *data) { - scsi_internal_device_block(sdev); + int ret; + + ret = scsi_internal_device_block(sdev); + + WARN_ONCE(ret, "scsi_internal_device_block(%s) failed: ret = %d\n", + dev_name(&sdev->sdev_gendev), ret); } static int diff --git a/drivers/scsi/scsi_logging.c b/drivers/scsi/scsi_logging.c index 39b8cc4574b4..c91fa3feb930 100644 --- a/drivers/scsi/scsi_logging.c +++ b/drivers/scsi/scsi_logging.c @@ -15,57 +15,15 @@ #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> -#define SCSI_LOG_SPOOLSIZE 4096 - -#if (SCSI_LOG_SPOOLSIZE / SCSI_LOG_BUFSIZE) > BITS_PER_LONG -#warning SCSI logging bitmask too large -#endif - -struct scsi_log_buf { - char buffer[SCSI_LOG_SPOOLSIZE]; - unsigned long map; -}; - -static DEFINE_PER_CPU(struct scsi_log_buf, scsi_format_log); - static char *scsi_log_reserve_buffer(size_t *len) { - struct scsi_log_buf *buf; - unsigned long map_bits = sizeof(buf->buffer) / SCSI_LOG_BUFSIZE; - unsigned long idx = 0; - - preempt_disable(); - buf = this_cpu_ptr(&scsi_format_log); - idx = find_first_zero_bit(&buf->map, map_bits); - if (likely(idx < map_bits)) { - while (test_and_set_bit(idx, &buf->map)) { - idx = find_next_zero_bit(&buf->map, map_bits, idx); - if (idx >= map_bits) - break; - } - } - if (WARN_ON(idx >= map_bits)) { - preempt_enable(); - return NULL; - } - *len = SCSI_LOG_BUFSIZE; - return buf->buffer + idx * SCSI_LOG_BUFSIZE; + *len = 128; + return kmalloc(*len, GFP_ATOMIC); } static void scsi_log_release_buffer(char *bufptr) { - struct scsi_log_buf *buf; - unsigned long idx; - int ret; - - buf = this_cpu_ptr(&scsi_format_log); - if (bufptr >= buf->buffer && - bufptr < buf->buffer + SCSI_LOG_SPOOLSIZE) { - idx = (bufptr - buf->buffer) / SCSI_LOG_BUFSIZE; - ret = test_and_clear_bit(idx, &buf->map); - WARN_ON(!ret); - } - preempt_enable(); + kfree(bufptr); } static inline const char *scmd_name(const struct scsi_cmnd *scmd) @@ -432,6 +390,7 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, const char *mlret_string = scsi_mlreturn_string(disposition); const char *hb_string = scsi_hostbyte_string(cmd->result); const char *db_string = scsi_driverbyte_string(cmd->result); + unsigned long cmd_age = (jiffies - cmd->jiffies_at_alloc) / HZ; logbuf = scsi_log_reserve_buffer(&logbuf_len); if (!logbuf) @@ -473,10 +432,15 @@ void scsi_print_result(const struct scsi_cmnd *cmd, const char *msg, if (db_string) off += scnprintf(logbuf + off, logbuf_len - off, - "driverbyte=%s", db_string); + "driverbyte=%s ", db_string); else off += scnprintf(logbuf + off, logbuf_len - off, - "driverbyte=0x%02x", driver_byte(cmd->result)); + "driverbyte=0x%02x ", + driver_byte(cmd->result)); + + off += scnprintf(logbuf + off, logbuf_len - off, + "cmd_age=%lus", cmd_age); + out_printk: dev_printk(KERN_INFO, &cmd->device->sdev_gendev, "%s", logbuf); scsi_log_release_buffer(logbuf); diff --git a/drivers/scsi/scsi_logging.h b/drivers/scsi/scsi_logging.h index 836185de28c4..3df877886119 100644 --- a/drivers/scsi/scsi_logging.h +++ b/drivers/scsi/scsi_logging.h @@ -53,7 +53,7 @@ do { \ } while (0) #else #define SCSI_LOG_LEVEL(SHIFT, BITS) 0 -#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) +#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do { } while (0) #endif /* CONFIG_SCSI_LOGGING */ /* diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c index 74ded5f3c236..3717eea37ecb 100644 --- a/drivers/scsi/scsi_pm.c +++ b/drivers/scsi/scsi_pm.c @@ -94,8 +94,7 @@ static int scsi_dev_type_resume(struct device *dev, if (!err && scsi_is_sdev_device(dev)) { struct scsi_device *sdev = to_scsi_device(dev); - if (sdev->request_queue->dev) - blk_set_runtime_active(sdev->request_queue); + blk_set_runtime_active(sdev->request_queue); } } diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index cc2859d76d81..3bff9f7aa684 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -87,7 +87,7 @@ int scsi_noretry_cmd(struct scsi_cmnd *scmd); extern void scsi_add_cmd_to_list(struct scsi_cmnd *cmd); extern void scsi_del_cmd_from_list(struct scsi_cmnd *cmd); extern int scsi_maybe_unblock_host(struct scsi_device *sdev); -extern void scsi_device_unbusy(struct scsi_device *sdev); +extern void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd); extern void scsi_queue_insert(struct scsi_cmnd *cmd, int reason); extern void scsi_io_completion(struct scsi_cmnd *, unsigned int); extern void scsi_run_host_queues(struct Scsi_Host *shost); diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index c074631086a4..d6982d355739 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -83,12 +83,12 @@ static int proc_scsi_host_open(struct inode *inode, struct file *file) 4 * PAGE_SIZE); } -static const struct file_operations proc_scsi_fops = { - .open = proc_scsi_host_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, - .write = proc_scsi_host_write +static const struct proc_ops proc_scsi_ops = { + .proc_open = proc_scsi_host_open, + .proc_release = single_release, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = proc_scsi_host_write }; /** @@ -146,7 +146,7 @@ void scsi_proc_host_add(struct Scsi_Host *shost) sprintf(name,"%d", shost->host_no); p = proc_create_data(name, S_IRUGO | S_IWUSR, - sht->proc_dir, &proc_scsi_fops, shost); + sht->proc_dir, &proc_scsi_ops, shost); if (!p) printk(KERN_ERR "%s: Failed to register host %d in" "%s\n", __func__, shost->host_no, @@ -372,15 +372,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, return err; } -static int always_match(struct device *dev, const void *data) -{ - return 1; -} - static inline struct device *next_scsi_device(struct device *start) { - struct device *next = bus_find_device(&scsi_bus_type, start, NULL, - always_match); + struct device *next = bus_find_next_device(&scsi_bus_type, start); + put_device(start); return next; } @@ -441,13 +436,12 @@ static int proc_scsi_open(struct inode *inode, struct file *file) return seq_open(file, &scsi_seq_ops); } -static const struct file_operations proc_scsi_operations = { - .owner = THIS_MODULE, - .open = proc_scsi_open, - .read = seq_read, - .write = proc_scsi_write, - .llseek = seq_lseek, - .release = seq_release, +static const struct proc_ops scsi_scsi_proc_ops = { + .proc_open = proc_scsi_open, + .proc_read = seq_read, + .proc_write = proc_scsi_write, + .proc_lseek = seq_lseek, + .proc_release = seq_release, }; /** @@ -461,7 +455,7 @@ int __init scsi_init_procfs(void) if (!proc_scsi) goto err1; - pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations); + pde = proc_create("scsi/scsi", 0, NULL, &scsi_scsi_proc_ops); if (!pde) goto err2; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 64c96c7828ee..677b5c5403d2 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -437,6 +437,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) struct device *parent; struct list_head *this, *tmp; struct scsi_vpd *vpd_pg80 = NULL, *vpd_pg83 = NULL; + struct scsi_vpd *vpd_pg0 = NULL, *vpd_pg89 = NULL; unsigned long flags; sdev = container_of(work, struct scsi_device, ew.work); @@ -466,16 +467,24 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work) sdev->request_queue = NULL; mutex_lock(&sdev->inquiry_mutex); - rcu_swap_protected(sdev->vpd_pg80, vpd_pg80, - lockdep_is_held(&sdev->inquiry_mutex)); - rcu_swap_protected(sdev->vpd_pg83, vpd_pg83, - lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg0 = rcu_replace_pointer(sdev->vpd_pg0, vpd_pg0, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg80 = rcu_replace_pointer(sdev->vpd_pg80, vpd_pg80, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg83 = rcu_replace_pointer(sdev->vpd_pg83, vpd_pg83, + lockdep_is_held(&sdev->inquiry_mutex)); + vpd_pg89 = rcu_replace_pointer(sdev->vpd_pg89, vpd_pg89, + lockdep_is_held(&sdev->inquiry_mutex)); mutex_unlock(&sdev->inquiry_mutex); + if (vpd_pg0) + kfree_rcu(vpd_pg0, rcu); if (vpd_pg83) kfree_rcu(vpd_pg83, rcu); if (vpd_pg80) kfree_rcu(vpd_pg80, rcu); + if (vpd_pg89) + kfree_rcu(vpd_pg89, rcu); kfree(sdev->inquiry); kfree(sdev); @@ -730,6 +739,14 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct kernfs_node *kn; + struct scsi_device *sdev = to_scsi_device(dev); + + /* + * We need to try to get module, avoiding the module been removed + * during delete. + */ + if (scsi_device_get(sdev)) + return -ENODEV; kn = sysfs_break_active_protection(&dev->kobj, &attr->attr); WARN_ON_ONCE(!kn); @@ -744,9 +761,10 @@ sdev_store_delete(struct device *dev, struct device_attribute *attr, * state into SDEV_DEL. */ device_remove_file(dev, attr); - scsi_remove_device(to_scsi_device(dev)); + scsi_remove_device(sdev); if (kn) sysfs_unbreak_active_protection(kn); + scsi_device_put(sdev); return count; }; static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); @@ -859,6 +877,8 @@ static struct bin_attribute dev_attr_vpd_##_page = { \ sdev_vpd_pg_attr(pg83); sdev_vpd_pg_attr(pg80); +sdev_vpd_pg_attr(pg89); +sdev_vpd_pg_attr(pg0); static ssize_t show_inquiry(struct file *filep, struct kobject *kobj, struct bin_attribute *bin_attr, @@ -1191,12 +1211,18 @@ static umode_t scsi_sdev_bin_attr_is_visible(struct kobject *kobj, struct scsi_device *sdev = to_scsi_device(dev); + if (attr == &dev_attr_vpd_pg0 && !sdev->vpd_pg0) + return 0; + if (attr == &dev_attr_vpd_pg80 && !sdev->vpd_pg80) return 0; if (attr == &dev_attr_vpd_pg83 && !sdev->vpd_pg83) return 0; + if (attr == &dev_attr_vpd_pg89 && !sdev->vpd_pg89) + return 0; + return S_IRUGO; } @@ -1239,8 +1265,10 @@ static struct attribute *scsi_sdev_attrs[] = { }; static struct bin_attribute *scsi_sdev_bin_attrs[] = { + &dev_attr_vpd_pg0, &dev_attr_vpd_pg83, &dev_attr_vpd_pg80, + &dev_attr_vpd_pg89, &dev_attr_inquiry, NULL }; @@ -1300,7 +1328,8 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev) device_enable_async_suspend(&sdev->sdev_gendev); scsi_autopm_get_target(starget); pm_runtime_set_active(&sdev->sdev_gendev); - pm_runtime_forbid(&sdev->sdev_gendev); + if (!sdev->rpm_autosuspend) + pm_runtime_forbid(&sdev->sdev_gendev); pm_runtime_enable(&sdev->sdev_gendev); scsi_autopm_put_target(starget); diff --git a/drivers/scsi/scsi_trace.c b/drivers/scsi/scsi_trace.c index 0f17e7dac1b0..ac35c301c792 100644 --- a/drivers/scsi/scsi_trace.c +++ b/drivers/scsi/scsi_trace.c @@ -9,7 +9,7 @@ #include <trace/events/scsi.h> #define SERVICE_ACTION16(cdb) (cdb[1] & 0x1f) -#define SERVICE_ACTION32(cdb) ((cdb[8] << 8) | cdb[9]) +#define SERVICE_ACTION32(cdb) (get_unaligned_be16(&cdb[8])) static const char * scsi_trace_misc(struct trace_seq *, unsigned char *, int); @@ -18,15 +18,18 @@ static const char * scsi_trace_rw6(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba = 0, txlen; lba |= ((cdb[1] & 0x1F) << 16); lba |= (cdb[2] << 8); lba |= cdb[3]; - txlen = cdb[4]; + /* + * From SBC-2: a TRANSFER LENGTH field set to zero specifies that 256 + * logical blocks shall be read (READ(6)) or written (WRITE(6)). + */ + txlen = cdb[4] ? cdb[4] : 256; - trace_seq_printf(p, "lba=%llu txlen=%llu", - (unsigned long long)lba, (unsigned long long)txlen); + trace_seq_printf(p, "lba=%u txlen=%u", lba, txlen); trace_seq_putc(p, 0); return ret; @@ -36,17 +39,12 @@ static const char * scsi_trace_rw10(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; + u32 lba, txlen; - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[7] << 8); - txlen |= cdb[8]; + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be16(&cdb[7]); - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME) @@ -61,19 +59,12 @@ static const char * scsi_trace_rw12(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; - - lba |= (cdb[2] << 24); - lba |= (cdb[3] << 16); - lba |= (cdb[4] << 8); - lba |= cdb[5]; - txlen |= (cdb[6] << 24); - txlen |= (cdb[7] << 16); - txlen |= (cdb[8] << 8); - txlen |= cdb[9]; - - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + u32 lba, txlen; + + lba = get_unaligned_be32(&cdb[2]); + txlen = get_unaligned_be32(&cdb[6]); + + trace_seq_printf(p, "lba=%u txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); trace_seq_putc(p, 0); @@ -84,23 +75,13 @@ static const char * scsi_trace_rw16(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - sector_t lba = 0, txlen = 0; - - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - txlen |= (cdb[10] << 24); - txlen |= (cdb[11] << 16); - txlen |= (cdb[12] << 8); - txlen |= cdb[13]; - - trace_seq_printf(p, "lba=%llu txlen=%llu protect=%u", - (unsigned long long)lba, (unsigned long long)txlen, + u64 lba; + u32 txlen; + + lba = get_unaligned_be64(&cdb[2]); + txlen = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "lba=%llu txlen=%u protect=%u", lba, txlen, cdb[1] >> 5); if (cdb[0] == WRITE_SAME_16) @@ -115,8 +96,8 @@ static const char * scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0, txlen = 0; - u32 ei_lbrt = 0; + u64 lba; + u32 ei_lbrt, txlen; switch (SERVICE_ACTION32(cdb)) { case READ_32: @@ -136,26 +117,12 @@ scsi_trace_rw32(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[12] << 56); - lba |= ((u64)cdb[13] << 48); - lba |= ((u64)cdb[14] << 40); - lba |= ((u64)cdb[15] << 32); - lba |= (cdb[16] << 24); - lba |= (cdb[17] << 16); - lba |= (cdb[18] << 8); - lba |= cdb[19]; - ei_lbrt |= (cdb[20] << 24); - ei_lbrt |= (cdb[21] << 16); - ei_lbrt |= (cdb[22] << 8); - ei_lbrt |= cdb[23]; - txlen |= (cdb[28] << 24); - txlen |= (cdb[29] << 16); - txlen |= (cdb[30] << 8); - txlen |= cdb[31]; - - trace_seq_printf(p, "%s_32 lba=%llu txlen=%llu protect=%u ei_lbrt=%u", - cmd, (unsigned long long)lba, - (unsigned long long)txlen, cdb[10] >> 5, ei_lbrt); + lba = get_unaligned_be64(&cdb[12]); + ei_lbrt = get_unaligned_be32(&cdb[20]); + txlen = get_unaligned_be32(&cdb[28]); + + trace_seq_printf(p, "%s_32 lba=%llu txlen=%u protect=%u ei_lbrt=%u", + cmd, lba, txlen, cdb[10] >> 5, ei_lbrt); if (SERVICE_ACTION32(cdb) == WRITE_SAME_32) trace_seq_printf(p, " unmap=%u", cdb[10] >> 3 & 1); @@ -170,7 +137,7 @@ static const char * scsi_trace_unmap(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p); - unsigned int regions = cdb[7] << 8 | cdb[8]; + unsigned int regions = get_unaligned_be16(&cdb[7]); trace_seq_printf(p, "regions=%u", (regions - 8) / 16); trace_seq_putc(p, 0); @@ -182,8 +149,8 @@ static const char * scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) { const char *ret = trace_seq_buffer_ptr(p), *cmd; - sector_t lba = 0; - u32 alloc_len = 0; + u64 lba; + u32 alloc_len; switch (SERVICE_ACTION16(cdb)) { case SAI_READ_CAPACITY_16: @@ -197,21 +164,10 @@ scsi_trace_service_action_in(struct trace_seq *p, unsigned char *cdb, int len) goto out; } - lba |= ((u64)cdb[2] << 56); - lba |= ((u64)cdb[3] << 48); - lba |= ((u64)cdb[4] << 40); - lba |= ((u64)cdb[5] << 32); - lba |= (cdb[6] << 24); - lba |= (cdb[7] << 16); - lba |= (cdb[8] << 8); - lba |= cdb[9]; - alloc_len |= (cdb[10] << 24); - alloc_len |= (cdb[11] << 16); - alloc_len |= (cdb[12] << 8); - alloc_len |= cdb[13]; - - trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, - (unsigned long long)lba, alloc_len); + lba = get_unaligned_be64(&cdb[2]); + alloc_len = get_unaligned_be32(&cdb[10]); + + trace_seq_printf(p, "%s lba=%llu alloc_len=%u", cmd, lba, alloc_len); out: trace_seq_putc(p, 0); diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 417b868d8735..dfc726fa34e3 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -24,6 +24,8 @@ #define ISCSI_TRANSPORT_VERSION "2.0-870" +#define ISCSI_SEND_MAX_ALLOWED 10 + #define CREATE_TRACE_POINTS #include <trace/events/iscsi.h> @@ -2091,7 +2093,12 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) "could not register session's dev\n"); goto release_ida; } - transport_register_device(&session->dev); + err = transport_register_device(&session->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, + "could not register transport's dev\n"); + goto release_dev; + } spin_lock_irqsave(&sesslock, flags); list_add(&session->sess_list, &sesslist); @@ -2101,6 +2108,8 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; +release_dev: + device_del(&session->dev); release_ida: if (session->ida_used) ida_simple_remove(&iscsi_sess_ida, session->target_id); @@ -2261,7 +2270,12 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) "register connection's dev\n"); goto release_parent_ref; } - transport_register_device(&conn->dev); + err = transport_register_device(&conn->dev); + if (err) { + iscsi_cls_session_printk(KERN_ERR, session, "could not " + "register transport's dev\n"); + goto release_conn_ref; + } spin_lock_irqsave(&connlock, flags); list_add(&conn->conn_list, &connlist); @@ -2270,6 +2284,8 @@ iscsi_create_conn(struct iscsi_cls_session *session, int dd_size, uint32_t cid) ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n"); return conn; +release_conn_ref: + put_device(&conn->dev); release_parent_ref: put_device(&session->dev); free_conn: @@ -2945,6 +2961,24 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) return err; } +static int iscsi_session_has_conns(int sid) +{ + struct iscsi_cls_conn *conn; + unsigned long flags; + int found = 0; + + spin_lock_irqsave(&connlock, flags); + list_for_each_entry(conn, &connlist, conn_list) { + if (iscsi_conn_get_sid(conn) == sid) { + found = 1; + break; + } + } + spin_unlock_irqrestore(&connlock, flags); + + return found; +} + static int iscsi_set_iface_params(struct iscsi_transport *transport, struct iscsi_uevent *ev, uint32_t len) @@ -3522,10 +3556,12 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) break; case ISCSI_UEVENT_DESTROY_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); - if (session) - transport->destroy_session(session); - else + if (!session) err = -EINVAL; + else if (iscsi_session_has_conns(ev->u.d_session.sid)) + err = -EBUSY; + else + transport->destroy_session(session); break; case ISCSI_UEVENT_UNBIND_SESSION: session = iscsi_session_lookup(ev->u.d_session.sid); @@ -3682,6 +3718,7 @@ iscsi_if_rx(struct sk_buff *skb) struct nlmsghdr *nlh; struct iscsi_uevent *ev; uint32_t group; + int retries = ISCSI_SEND_MAX_ALLOWED; nlh = nlmsg_hdr(skb); if (nlh->nlmsg_len < sizeof(*nlh) + sizeof(*ev) || @@ -3712,6 +3749,10 @@ iscsi_if_rx(struct sk_buff *skb) break; err = iscsi_if_send_reply(portid, nlh->nlmsg_type, ev, sizeof(*ev)); + if (err == -EAGAIN && --retries < 0) { + printk(KERN_WARNING "Send reply failed, error %d\n", err); + break; + } } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index ef138c57e2a6..182fd25c7c43 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1391,9 +1391,6 @@ static void sas_expander_release(struct device *dev) struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_expander_device *edev = rphy_to_expander_device(rphy); - if (rphy->q) - blk_cleanup_queue(rphy->q); - put_device(dev->parent); kfree(edev); } @@ -1403,9 +1400,6 @@ static void sas_end_device_release(struct device *dev) struct sas_rphy *rphy = dev_to_rphy(dev); struct sas_end_device *edev = rphy_to_end_device(rphy); - if (rphy->q) - blk_cleanup_queue(rphy->q); - put_device(dev->parent); kfree(edev); } @@ -1634,8 +1628,7 @@ sas_rphy_remove(struct sas_rphy *rphy) } sas_rphy_unlink(rphy); - if (rphy->q) - bsg_unregister_queue(rphy->q); + bsg_remove_queue(rphy->q); transport_remove_device(dev); device_del(dev); } diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 149d406aacc9..8ca9299ffd36 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -122,8 +122,6 @@ static void sd_eh_reset(struct scsi_cmnd *); static int sd_eh_action(struct scsi_cmnd *, int); static void sd_read_capacity(struct scsi_disk *sdkp, unsigned char *buffer); static void scsi_disk_release(struct device *cdev); -static void sd_print_sense_hdr(struct scsi_disk *, struct scsi_sense_hdr *); -static void sd_print_result(const struct scsi_disk *, const char *, int); static DEFINE_IDA(sd_index_ida); @@ -1166,11 +1164,12 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) sector_t lba = sectors_to_logical(sdp, blk_rq_pos(rq)); sector_t threshold; unsigned int nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq)); - bool dif, dix; unsigned int mask = logical_to_sectors(sdp, 1) - 1; bool write = rq_data_dir(rq) == WRITE; unsigned char protect, fua; blk_status_t ret; + unsigned int dif; + bool dix; ret = scsi_init_io(cmd); if (ret != BLK_STS_OK) @@ -1211,9 +1210,6 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd) dix = scsi_prot_sg_count(cmd); dif = scsi_host_dif_capable(cmd->device->host, sdkp->protection_type); - if (write && dix) - t10_pi_prepare(cmd->request, sdkp->protection_type); - if (dif || dix) protect = sd_setup_protect_cmnd(cmd, dix, dif); else @@ -1293,7 +1289,17 @@ static blk_status_t sd_init_command(struct scsi_cmnd *cmd) case REQ_OP_WRITE: return sd_setup_read_write_cmnd(cmd); case REQ_OP_ZONE_RESET: - return sd_zbc_setup_reset_cmnd(cmd); + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, + false); + case REQ_OP_ZONE_RESET_ALL: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_RESET_WRITE_POINTER, + true); + case REQ_OP_ZONE_OPEN: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_OPEN_ZONE, false); + case REQ_OP_ZONE_CLOSE: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_CLOSE_ZONE, false); + case REQ_OP_ZONE_FINISH: + return sd_zbc_setup_zone_mgmt_cmnd(cmd, ZO_FINISH_ZONE, false); default: WARN_ON_ONCE(1); return BLK_STS_NOTSUPP; @@ -1459,13 +1465,12 @@ static int sd_getgeo(struct block_device *bdev, struct hd_geometry *geo) * Note: most ioctls are forward onto the block subsystem or further * down in the scsi subsystem. **/ -static int sd_ioctl(struct block_device *bdev, fmode_t mode, - unsigned int cmd, unsigned long arg) +static int sd_ioctl_common(struct block_device *bdev, fmode_t mode, + unsigned int cmd, void __user *p) { struct gendisk *disk = bdev->bd_disk; struct scsi_disk *sdkp = scsi_disk(disk); struct scsi_device *sdp = sdkp->device; - void __user *p = (void __user *)arg; int error; SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " @@ -1501,9 +1506,6 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, break; default: error = scsi_cmd_blk_ioctl(bdev, mode, cmd, p); - if (error != -ENOTTY) - break; - error = scsi_ioctl(sdp, cmd, p); break; } out: @@ -1655,7 +1657,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) /* we need to evaluate the error return */ if (scsi_sense_valid(sshdr) && (sshdr->asc == 0x3a || /* medium not present */ - sshdr->asc == 0x20)) /* invalid command */ + sshdr->asc == 0x20 || /* invalid command */ + (sshdr->asc == 0x74 && sshdr->ascq == 0x71))) /* drive is password locked */ /* this is no error here */ return 0; @@ -1684,29 +1687,31 @@ static void sd_rescan(struct device *dev) revalidate_disk(sdkp->disk); } +static int sd_ioctl(struct block_device *bdev, fmode_t mode, + unsigned int cmd, unsigned long arg) +{ + void __user *p = (void __user *)arg; + int ret; + + ret = sd_ioctl_common(bdev, mode, cmd, p); + if (ret != -ENOTTY) + return ret; + + return scsi_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); +} #ifdef CONFIG_COMPAT -/* - * This gets directly called from VFS. When the ioctl - * is not recognized we go back to the other translation paths. - */ static int sd_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { - struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device; - int error; + void __user *p = compat_ptr(arg); + int ret; - error = scsi_ioctl_block_when_processing_errors(sdev, cmd, - (mode & FMODE_NDELAY) != 0); - if (error) - return error; - - /* - * Let the static ioctl translation table take care of it. - */ - if (!sdev->host->hostt->compat_ioctl) - return -ENOIOCTLCMD; - return sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); + ret = sd_ioctl_common(bdev, mode, cmd, p); + if (ret != -ENOTTY) + return ret; + + return scsi_compat_ioctl(scsi_disk(bdev->bd_disk)->device, cmd, p); } #endif @@ -1959,6 +1964,10 @@ static int sd_done(struct scsi_cmnd *SCpnt) case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_SAME: case REQ_OP_ZONE_RESET: + case REQ_OP_ZONE_RESET_ALL: + case REQ_OP_ZONE_OPEN: + case REQ_OP_ZONE_CLOSE: + case REQ_OP_ZONE_FINISH: if (!result) { good_bytes = blk_rq_bytes(req); scsi_set_resid(SCpnt, 0); @@ -1978,6 +1987,7 @@ static int sd_done(struct scsi_cmnd *SCpnt) sd_printk(KERN_INFO, sdkp, "Unaligned partial completion (resid=%u, sector_sz=%u)\n", resid, sector_size); + scsi_print_command(SCpnt); resid = min(scsi_bufflen(SCpnt), round_up(resid, sector_size)); scsi_set_resid(SCpnt, resid); @@ -2051,11 +2061,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) "sd_done: completed %d of %d bytes\n", good_bytes, scsi_bufflen(SCpnt))); - if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt) && - good_bytes) - t10_pi_complete(SCpnt->request, sdkp->protection_type, - good_bytes / scsi_prot_interval(SCpnt)); - return good_bytes; } @@ -2194,8 +2199,10 @@ static int sd_read_protection_type(struct scsi_disk *sdkp, unsigned char *buffer u8 type; int ret = 0; - if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) + if (scsi_device_protection(sdp) == 0 || (buffer[12] & 1) == 0) { + sdkp->protection_type = 0; return ret; + } type = ((buffer[12] >> 1) & 7) + 1; /* P_TYPE 0 = Type 1 */ @@ -2939,15 +2946,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) q->limits.zoned = BLK_ZONED_HM; } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) + if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { /* Host-aware */ q->limits.zoned = BLK_ZONED_HA; - else + } else { /* - * Treat drive-managed devices as - * regular block devices. + * Treat drive-managed devices and host-aware devices + * with partitions as regular block devices. */ q->limits.zoned = BLK_ZONED_NONE; + } } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", @@ -3371,6 +3379,10 @@ static int sd_probe(struct device *dev) } blk_pm_runtime_init(sdp->request_queue, dev); + if (sdp->rpm_autosuspend) { + pm_runtime_set_autosuspend_delay(dev, + sdp->host->hostt->rpm_autosuspend_delay); + } device_add_disk(dev, gd, NULL); if (sdkp->capacity) sd_dif_config_host(sdkp); @@ -3703,15 +3715,13 @@ static void __exit exit_sd(void) module_init(init_sd); module_exit(exit_sd); -static void sd_print_sense_hdr(struct scsi_disk *sdkp, - struct scsi_sense_hdr *sshdr) +void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr) { scsi_print_sense_hdr(sdkp->device, sdkp->disk ? sdkp->disk->disk_name : NULL, sshdr); } -static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, - int result) +void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result) { const char *hb_string = scsi_hostbyte_string(result); const char *db_string = scsi_driverbyte_string(result); @@ -3726,4 +3736,3 @@ static void sd_print_result(const struct scsi_disk *sdkp, const char *msg, "%s: Result: hostbyte=0x%02x driverbyte=0x%02x\n", msg, host_byte(result), driver_byte(result)); } - diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 38c50946fc42..50fff0bf8c8e 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -209,11 +209,12 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp) extern int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); extern void sd_zbc_print_zones(struct scsi_disk *sdkp); -extern blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd); +blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, bool all); extern void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr); -extern int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones); +int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, + unsigned int nr_zones, report_zones_cb cb, void *data); #else /* CONFIG_BLK_DEV_ZONED */ @@ -225,7 +226,9 @@ static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, static inline void sd_zbc_print_zones(struct scsi_disk *sdkp) {} -static inline blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) +static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, + bool all) { return BLK_STS_TARGET; } @@ -238,4 +241,7 @@ static inline void sd_zbc_complete(struct scsi_cmnd *cmd, #endif /* CONFIG_BLK_DEV_ZONED */ +void sd_print_sense_hdr(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr); +void sd_print_result(const struct scsi_disk *sdkp, const char *msg, int result); + #endif /* _SCSI_DISK_H */ diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 5d6ff3931632..e4282bce5834 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -19,34 +19,27 @@ #include "sd.h" -/** - * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone, - * @sdkp: The disk the report originated from - * @buf: Address of the report zone descriptor - * @zone: the destination zone structure - * - * All LBA sized values are converted to 512B sectors unit. - */ -static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf, - struct blk_zone *zone) +static int sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf, + unsigned int idx, report_zones_cb cb, void *data) { struct scsi_device *sdp = sdkp->device; + struct blk_zone zone = { 0 }; - memset(zone, 0, sizeof(struct blk_zone)); - - zone->type = buf[0] & 0x0f; - zone->cond = (buf[1] >> 4) & 0xf; + zone.type = buf[0] & 0x0f; + zone.cond = (buf[1] >> 4) & 0xf; if (buf[1] & 0x01) - zone->reset = 1; + zone.reset = 1; if (buf[1] & 0x02) - zone->non_seq = 1; - - zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); - zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); - zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); - if (zone->type != ZBC_ZONE_TYPE_CONV && - zone->cond == ZBC_ZONE_COND_FULL) - zone->wp = zone->start + zone->len; + zone.non_seq = 1; + + zone.len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8])); + zone.start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16])); + zone.wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24])); + if (zone.type != ZBC_ZONE_TYPE_CONV && + zone.cond == ZBC_ZONE_COND_FULL) + zone.wp = zone.start + zone.len; + + return cb(&zone, idx, data); } /** @@ -87,9 +80,11 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, timeout, SD_MAX_RETRIES, NULL); if (result) { sd_printk(KERN_ERR, sdkp, - "REPORT ZONES lba %llu failed with %d/%d\n", - (unsigned long long)lba, - host_byte(result), driver_byte(result)); + "REPORT ZONES start lba %llu failed\n", lba); + sd_print_result(sdkp, "REPORT ZONES", result); + if (driver_byte(result) == DRIVER_SENSE && + scsi_sense_valid(&sshdr)) + sd_print_sense_hdr(sdkp, &sshdr); return -EIO; } @@ -104,11 +99,6 @@ static int sd_zbc_do_report_zones(struct scsi_disk *sdkp, unsigned char *buf, return 0; } -/* - * Maximum number of zones to get with one report zones command. - */ -#define SD_ZBC_REPORT_MAX_ZONES 8192U - /** * Allocate a buffer for report zones reply. * @sdkp: The target disk @@ -138,81 +128,94 @@ static void *sd_zbc_alloc_report_buffer(struct scsi_disk *sdkp, * sure that the allocated buffer can always be mapped by limiting the * number of pages allocated to the HBA max segments limit. */ - nr_zones = min(nr_zones, SD_ZBC_REPORT_MAX_ZONES); - bufsize = roundup((nr_zones + 1) * 64, 512); + nr_zones = min(nr_zones, sdkp->nr_zones); + bufsize = roundup((nr_zones + 1) * 64, SECTOR_SIZE); bufsize = min_t(size_t, bufsize, queue_max_hw_sectors(q) << SECTOR_SHIFT); bufsize = min_t(size_t, bufsize, queue_max_segments(q) << PAGE_SHIFT); - buf = vzalloc(bufsize); - if (buf) - *buflen = bufsize; + while (bufsize >= SECTOR_SIZE) { + buf = __vmalloc(bufsize, + GFP_KERNEL | __GFP_ZERO | __GFP_NORETRY, + PAGE_KERNEL); + if (buf) { + *buflen = bufsize; + return buf; + } + bufsize >>= 1; + } - return buf; + return NULL; } /** - * sd_zbc_report_zones - Disk report zones operation. - * @disk: The target disk - * @sector: Start 512B sector of the report - * @zones: Array of zone descriptors - * @nr_zones: Number of descriptors in the array - * - * Execute a report zones command on the target disk. + * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors. + * @sdkp: The target disk */ +static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) +{ + return logical_to_sectors(sdkp->device, sdkp->zone_blocks); +} + int sd_zbc_report_zones(struct gendisk *disk, sector_t sector, - struct blk_zone *zones, unsigned int *nr_zones) + unsigned int nr_zones, report_zones_cb cb, void *data) { struct scsi_disk *sdkp = scsi_disk(disk); - unsigned int i, nrz = *nr_zones; + unsigned int nr, i; unsigned char *buf; - size_t buflen = 0, offset = 0; - int ret = 0; + size_t offset, buflen = 0; + int zone_idx = 0; + int ret; if (!sd_is_zoned(sdkp)) /* Not a zoned device */ return -EOPNOTSUPP; - buf = sd_zbc_alloc_report_buffer(sdkp, nrz, &buflen); + buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen); if (!buf) return -ENOMEM; - ret = sd_zbc_do_report_zones(sdkp, buf, buflen, - sectors_to_logical(sdkp->device, sector), true); - if (ret) - goto out; + while (zone_idx < nr_zones && sector < get_capacity(disk)) { + ret = sd_zbc_do_report_zones(sdkp, buf, buflen, + sectors_to_logical(sdkp->device, sector), true); + if (ret) + goto out; + + offset = 0; + nr = min(nr_zones, get_unaligned_be32(&buf[0]) / 64); + if (!nr) + break; + + for (i = 0; i < nr && zone_idx < nr_zones; i++) { + offset += 64; + ret = sd_zbc_parse_report(sdkp, buf + offset, zone_idx, + cb, data); + if (ret) + goto out; + zone_idx++; + } - nrz = min(nrz, get_unaligned_be32(&buf[0]) / 64); - for (i = 0; i < nrz; i++) { - offset += 64; - sd_zbc_parse_report(sdkp, buf + offset, zones); - zones++; + sector += sd_zbc_zone_sectors(sdkp) * i; } - *nr_zones = nrz; - + ret = zone_idx; out: kvfree(buf); - return ret; } /** - * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors. - * @sdkp: The target disk - */ -static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp) -{ - return logical_to_sectors(sdkp->device, sdkp->zone_blocks); -} - -/** - * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command. + * sd_zbc_setup_zone_mgmt_cmnd - Prepare a zone ZBC_OUT command. The operations + * can be RESET WRITE POINTER, OPEN, CLOSE or FINISH. * @cmd: the command to setup + * @op: Operation to be performed + * @all: All zones control * - * Called from sd_init_command() for a REQ_OP_ZONE_RESET request. + * Called from sd_init_command() for REQ_OP_ZONE_RESET, REQ_OP_ZONE_RESET_ALL, + * REQ_OP_ZONE_OPEN, REQ_OP_ZONE_CLOSE or REQ_OP_ZONE_FINISH requests. */ -blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) +blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, + unsigned char op, bool all) { struct request *rq = cmd->request; struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); @@ -233,8 +236,11 @@ blk_status_t sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd) cmd->cmd_len = 16; memset(cmd->cmnd, 0, cmd->cmd_len); cmd->cmnd[0] = ZBC_OUT; - cmd->cmnd[1] = ZO_RESET_WRITE_POINTER; - put_unaligned_be64(block, &cmd->cmnd[2]); + cmd->cmnd[1] = op; + if (all) + cmd->cmnd[14] = 0x1; + else + put_unaligned_be64(block, &cmd->cmnd[2]); rq->timeout = SD_TIMEOUT; cmd->sc_data_direction = DMA_NONE; @@ -259,24 +265,16 @@ void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, int result = cmd->result; struct request *rq = cmd->request; - switch (req_op(rq)) { - case REQ_OP_ZONE_RESET: - - if (result && - sshdr->sense_key == ILLEGAL_REQUEST && - sshdr->asc == 0x24) - /* - * INVALID FIELD IN CDB error: reset of a conventional - * zone was attempted. Nothing to worry about, so be - * quiet about the error. - */ - rq->rq_flags |= RQF_QUIET; - break; - - case REQ_OP_WRITE: - case REQ_OP_WRITE_ZEROES: - case REQ_OP_WRITE_SAME: - break; + if (op_is_zone_mgmt(req_op(rq)) && + result && + sshdr->sense_key == ILLEGAL_REQUEST && + sshdr->asc == 0x24) { + /* + * INVALID FIELD IN CDB error: a zone management command was + * attempted on a conventional zone. Nothing to worry about, + * so be quiet about the error. + */ + rq->rq_flags |= RQF_QUIET; } } @@ -327,44 +325,29 @@ static int sd_zbc_check_zoned_characteristics(struct scsi_disk *sdkp, } /** - * sd_zbc_check_zones - Check the device capacity and zone sizes + * sd_zbc_check_capacity - Check the device capacity * @sdkp: Target disk + * @buf: command buffer + * @zblock: zone size in number of blocks * - * Check that the device capacity as reported by READ CAPACITY matches the - * max_lba value (plus one)of the report zones command reply. Also check that - * all zones of the device have an equal size, only allowing the last zone of - * the disk to have a smaller size (runt zone). The zone size must also be a - * power of two. + * Get the device zone size and check that the device capacity as reported + * by READ CAPACITY matches the max_lba value (plus one) of the report zones + * command reply for devices with RC_BASIS == 0. * - * Returns the zone size in number of blocks upon success or an error code - * upon failure. + * Returns 0 upon success or an error code upon failure. */ -static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks) +static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf, + u32 *zblocks) { - size_t bufsize, buflen; - unsigned int noio_flag; - u64 zone_blocks = 0; - sector_t max_lba, block = 0; - unsigned char *buf; + u64 zone_blocks; + sector_t max_lba; unsigned char *rec; int ret; - u8 same; - - /* Do all memory allocations as if GFP_NOIO was specified */ - noio_flag = memalloc_noio_save(); - - /* Get a buffer */ - buf = sd_zbc_alloc_report_buffer(sdkp, SD_ZBC_REPORT_MAX_ZONES, - &bufsize); - if (!buf) { - ret = -ENOMEM; - goto out; - } - /* Do a report zone to get max_lba and the same field */ - ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, 0, false); + /* Do a report zone to get max_lba and the size of the first zone */ + ret = sd_zbc_do_report_zones(sdkp, buf, SD_BUF_SIZE, 0, false); if (ret) - goto out_free; + return ret; if (sdkp->rc_basis == 0) { /* The max_lba field is the capacity of this device */ @@ -379,82 +362,19 @@ static int sd_zbc_check_zones(struct scsi_disk *sdkp, u32 *zblocks) } } - /* - * Check same field: for any value other than 0, we know that all zones - * have the same size. - */ - same = buf[4] & 0x0f; - if (same > 0) { - rec = &buf[64]; - zone_blocks = get_unaligned_be64(&rec[8]); - goto out; - } - - /* - * Check the size of all zones: all zones must be of - * equal size, except the last zone which can be smaller - * than other zones. - */ - do { - - /* Parse REPORT ZONES header */ - buflen = min_t(size_t, get_unaligned_be32(&buf[0]) + 64, - bufsize); - rec = buf + 64; - - /* Parse zone descriptors */ - while (rec < buf + buflen) { - u64 this_zone_blocks = get_unaligned_be64(&rec[8]); - - if (zone_blocks == 0) { - zone_blocks = this_zone_blocks; - } else if (this_zone_blocks != zone_blocks && - (block + this_zone_blocks < sdkp->capacity - || this_zone_blocks > zone_blocks)) { - zone_blocks = 0; - goto out; - } - block += this_zone_blocks; - rec += 64; - } - - if (block < sdkp->capacity) { - ret = sd_zbc_do_report_zones(sdkp, buf, bufsize, block, - true); - if (ret) - goto out_free; - } - - } while (block < sdkp->capacity); - -out: - if (!zone_blocks) { - if (sdkp->first_scan) - sd_printk(KERN_NOTICE, sdkp, - "Devices with non constant zone " - "size are not supported\n"); - ret = -ENODEV; - } else if (!is_power_of_2(zone_blocks)) { - if (sdkp->first_scan) - sd_printk(KERN_NOTICE, sdkp, - "Devices with non power of 2 zone " - "size are not supported\n"); - ret = -ENODEV; - } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { + /* Get the size of the first reported zone */ + rec = buf + 64; + zone_blocks = get_unaligned_be64(&rec[8]); + if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) { if (sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Zone size too large\n"); - ret = -EFBIG; - } else { - *zblocks = zone_blocks; - ret = 0; + return -EFBIG; } -out_free: - memalloc_noio_restore(noio_flag); - kvfree(buf); + *zblocks = zone_blocks; - return ret; + return 0; } int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) @@ -476,17 +396,15 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf) if (ret) goto err; - /* - * Check zone size: only devices with a constant zone size (except - * an eventual last runt zone) that is a power of 2 are supported. - */ - ret = sd_zbc_check_zones(sdkp, &zone_blocks); + /* Check the device capacity reported by report zones */ + ret = sd_zbc_check_capacity(sdkp, buf, &zone_blocks); if (ret != 0) goto err; /* The drive satisfies the kernel restrictions: set it up */ - blk_queue_chunk_sectors(sdkp->disk->queue, - logical_to_sectors(sdkp->device, zone_blocks)); + blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, sdkp->disk->queue); + blk_queue_required_elevator_features(sdkp->disk->queue, + ELEVATOR_F_ZBD_SEQ_WRITE); nr_zones = round_up(sdkp->capacity, zone_blocks) >> ilog2(zone_blocks); /* READ16/WRITE16 is mandatory for ZBC disks */ diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index cce757506383..4e6af592f018 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -405,6 +405,38 @@ sg_release(struct inode *inode, struct file *filp) return 0; } +static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) +{ + struct sg_header __user *old_hdr = buf; + int reply_len; + + if (count >= SZ_SG_HEADER) { + /* negative reply_len means v3 format, otherwise v1/v2 */ + if (get_user(reply_len, &old_hdr->reply_len)) + return -EFAULT; + + if (reply_len >= 0) + return get_user(*pack_id, &old_hdr->pack_id); + + if (in_compat_syscall() && + count >= sizeof(struct compat_sg_io_hdr)) { + struct compat_sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + + if (count >= sizeof(struct sg_io_hdr)) { + struct sg_io_hdr __user *hp = buf; + + return get_user(*pack_id, &hp->pack_id); + } + } + + /* no valid header was passed, so ignore the pack_id */ + *pack_id = -1; + return 0; +} + static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { @@ -413,8 +445,8 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; - struct sg_header *old_hdr = NULL; - int retval = 0; + struct sg_header *old_hdr; + int retval; /* * This could cause a response to be stranded. Close the associated @@ -429,72 +461,34 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_read: count=%d\n", (int) count)); - if (!access_ok(buf, count)) - return -EFAULT; - if (sfp->force_packid && (count >= SZ_SG_HEADER)) { - old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); - if (!old_hdr) - return -ENOMEM; - if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { - retval = -EFAULT; - goto free_old_hdr; - } - if (old_hdr->reply_len < 0) { - if (count >= SZ_SG_IO_HDR) { - sg_io_hdr_t *new_hdr; - new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); - if (!new_hdr) { - retval = -ENOMEM; - goto free_old_hdr; - } - retval =__copy_from_user - (new_hdr, buf, SZ_SG_IO_HDR); - req_pack_id = new_hdr->pack_id; - kfree(new_hdr); - if (retval) { - retval = -EFAULT; - goto free_old_hdr; - } - } - } else - req_pack_id = old_hdr->pack_id; - } + if (sfp->force_packid) + retval = get_sg_io_pack_id(&req_pack_id, buf, count); + if (retval) + return retval; + srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ - if (atomic_read(&sdp->detaching)) { - retval = -ENODEV; - goto free_old_hdr; - } - if (filp->f_flags & O_NONBLOCK) { - retval = -EAGAIN; - goto free_old_hdr; - } + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (filp->f_flags & O_NONBLOCK) + return -EAGAIN; retval = wait_event_interruptible(sfp->read_wait, (atomic_read(&sdp->detaching) || (srp = sg_get_rq_mark(sfp, req_pack_id)))); - if (atomic_read(&sdp->detaching)) { - retval = -ENODEV; - goto free_old_hdr; - } - if (retval) { + if (atomic_read(&sdp->detaching)) + return -ENODEV; + if (retval) /* -ERESTARTSYS as signal hit process */ - goto free_old_hdr; - } - } - if (srp->header.interface_id != '\0') { - retval = sg_new_read(sfp, buf, count, srp); - goto free_old_hdr; + return retval; } + if (srp->header.interface_id != '\0') + return sg_new_read(sfp, buf, count, srp); hp = &srp->header; - if (old_hdr == NULL) { - old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); - if (! old_hdr) { - retval = -ENOMEM; - goto free_old_hdr; - } - } - memset(old_hdr, 0, SZ_SG_HEADER); + old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); + if (!old_hdr) + return -ENOMEM; + old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; @@ -538,7 +532,7 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { - if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { + if (copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } @@ -568,7 +562,12 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) int err = 0, err2; int len; - if (count < SZ_SG_IO_HDR) { + if (in_compat_syscall()) { + if (count < sizeof(struct compat_sg_io_hdr)) { + err = -EINVAL; + goto err_out; + } + } else if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } @@ -589,10 +588,7 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; - if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { - err = -EFAULT; - goto err_out; - } + err = put_sg_io_hdr(hp, buf); err_out: err2 = sg_finish_rem_req(srp); sg_remove_request(sfp, srp); @@ -627,11 +623,9 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) scsi_block_when_processing_errors(sdp->device))) return -ENXIO; - if (!access_ok(buf, count)) - return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; - if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) + if (copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) @@ -640,13 +634,15 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ + buf += SZ_SG_HEADER; + if (get_user(opcode, buf)) + return -EFAULT; + if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, "sg_write: queue full\n")); return -EDOM; } - buf += SZ_SG_HEADER; - __get_user(opcode, buf); mutex_lock(&sfp->f_mutex); if (sfp->next_cmd_len > 0) { cmd_size = sfp->next_cmd_len; @@ -689,7 +685,7 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; - if (__copy_from_user(cmnd, buf, cmd_size)) + if (copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, @@ -724,8 +720,6 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, if (count < SZ_SG_IO_HDR) return -EINVAL; - if (!access_ok(buf, count)) - return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { @@ -735,7 +729,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, } srp->sg_io_owned = sg_io_owned; hp = &srp->header; - if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { + if (get_sg_io_hdr(hp, buf)) { sg_remove_request(sfp, srp); return -EFAULT; } @@ -763,11 +757,7 @@ sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, sg_remove_request(sfp, srp); return -EMSGSIZE; } - if (!access_ok(hp->cmdp, hp->cmd_len)) { - sg_remove_request(sfp, srp); - return -EFAULT; /* protects following copy_from_user()s + get_user()s */ - } - if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { + if (copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } @@ -893,20 +883,42 @@ sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) } } +#ifdef CONFIG_COMPAT +struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ + char req_state; + char orphan; + char sg_io_owned; + char problem; + int pack_id; + compat_uptr_t usr_ptr; + unsigned int duration; + int unused; +}; + +static int put_compat_request_table(struct compat_sg_req_info __user *o, + struct sg_req_info *rinfo) +{ + int i; + for (i = 0; i < SG_MAX_QUEUE; i++) { + if (copy_to_user(o + i, rinfo + i, offsetof(sg_req_info_t, usr_ptr)) || + put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) || + put_user(rinfo[i].duration, &o[i].duration) || + put_user(rinfo[i].unused, &o[i].unused)) + return -EFAULT; + } + return 0; +} +#endif + static long -sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, + unsigned int cmd_in, void __user *p) { - void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; - Sg_device *sdp; - Sg_fd *sfp; Sg_request *srp; unsigned long iflags; - if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) - return -ENXIO; - SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_ioctl: cmd=0x%x\n", (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); @@ -917,8 +929,6 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; - if (!access_ok(p, SZ_SG_IO_HDR)) - return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) @@ -963,26 +973,21 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) case SG_GET_LOW_DMA: return put_user((int) sdp->device->host->unchecked_isa_dma, ip); case SG_GET_SCSI_ID: - if (!access_ok(p, sizeof (sg_scsi_id_t))) - return -EFAULT; - else { - sg_scsi_id_t __user *sg_idp = p; + { + sg_scsi_id_t v; if (atomic_read(&sdp->detaching)) return -ENODEV; - __put_user((int) sdp->device->host->host_no, - &sg_idp->host_no); - __put_user((int) sdp->device->channel, - &sg_idp->channel); - __put_user((int) sdp->device->id, &sg_idp->scsi_id); - __put_user((int) sdp->device->lun, &sg_idp->lun); - __put_user((int) sdp->device->type, &sg_idp->scsi_type); - __put_user((short) sdp->device->host->cmd_per_lun, - &sg_idp->h_cmd_per_lun); - __put_user((short) sdp->device->queue_depth, - &sg_idp->d_queue_depth); - __put_user(0, &sg_idp->unused[0]); - __put_user(0, &sg_idp->unused[1]); + memset(&v, 0, sizeof(v)); + v.host_no = sdp->device->host->host_no; + v.channel = sdp->device->channel; + v.scsi_id = sdp->device->id; + v.lun = sdp->device->lun; + v.scsi_type = sdp->device->type; + v.h_cmd_per_lun = sdp->device->host->cmd_per_lun; + v.d_queue_depth = sdp->device->queue_depth; + if (copy_to_user(p, &v, sizeof(sg_scsi_id_t))) + return -EFAULT; return 0; } case SG_SET_FORCE_PACK_ID: @@ -992,20 +997,16 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: - if (!access_ok(ip, sizeof (int))) - return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); list_for_each_entry(srp, &sfp->rq_list, entry) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - __put_user(srp->header.pack_id, ip); - return 0; + return put_user(srp->header.pack_id, ip); } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - __put_user(-1, ip); - return 0; + return put_user(-1, ip); case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); val = 0; @@ -1073,9 +1074,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: - if (!access_ok(p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) - return -EFAULT; - else { + { sg_req_info_t *rinfo; rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO, @@ -1085,8 +1084,13 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) read_lock_irqsave(&sfp->rq_list_lock, iflags); sg_fill_request_table(sfp, rinfo); read_unlock_irqrestore(&sfp->rq_list_lock, iflags); - result = __copy_to_user(p, rinfo, - SZ_SG_REQ_INFO * SG_MAX_QUEUE); + #ifdef CONFIG_COMPAT + if (in_compat_syscall()) + result = put_compat_request_table(p, rinfo); + else + #endif + result = copy_to_user(p, rinfo, + SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; @@ -1137,29 +1141,44 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) cmd_in, filp->f_flags & O_NDELAY); if (result) return result; + + return -ENOIOCTLCMD; +} + +static long +sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = (void __user *)arg; + Sg_device *sdp; + Sg_fd *sfp; + int ret; + + if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) + return -ENXIO; + + ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); + if (ret != -ENOIOCTLCMD) + return ret; + return scsi_ioctl(sdp->device, cmd_in, p); } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { + void __user *p = compat_ptr(arg); Sg_device *sdp; Sg_fd *sfp; - struct scsi_device *sdev; + int ret; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; - sdev = sdp->device; - if (sdev->host->hostt->compat_ioctl) { - int ret; - - ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); - + ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); + if (ret != -ENOIOCTLCMD) return ret; - } - - return -ENOIOCTLCMD; + + return scsi_compat_ioctl(sdp->device, cmd_in, p); } #endif @@ -1797,7 +1816,14 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) struct iovec *iov = NULL; struct iov_iter i; - res = import_iovec(rw, hp->dxferp, iov_count, 0, &iov, &i); +#ifdef CONFIG_COMPAT + if (in_compat_syscall()) + res = compat_import_iovec(rw, hp->dxferp, iov_count, + 0, &iov, &i); + else +#endif + res = import_iovec(rw, hp->dxferp, iov_count, + 0, &iov, &i); if (res < 0) return res; @@ -1984,12 +2010,12 @@ sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { - if (__copy_to_user(outp, page_address(schp->pages[k]), + if (copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { - if (__copy_to_user(outp, page_address(schp->pages[k]), + if (copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; @@ -2296,25 +2322,23 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); -static const struct file_operations adio_fops = { - .owner = THIS_MODULE, - .open = sg_proc_single_open_adio, - .read = seq_read, - .llseek = seq_lseek, - .write = sg_proc_write_adio, - .release = single_release, +static const struct proc_ops adio_proc_ops = { + .proc_open = sg_proc_single_open_adio, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = sg_proc_write_adio, + .proc_release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); -static const struct file_operations dressz_fops = { - .owner = THIS_MODULE, - .open = sg_proc_single_open_dressz, - .read = seq_read, - .llseek = seq_lseek, - .write = sg_proc_write_dressz, - .release = single_release, +static const struct proc_ops dressz_proc_ops = { + .proc_open = sg_proc_single_open_dressz, + .proc_read = seq_read, + .proc_lseek = seq_lseek, + .proc_write = sg_proc_write_dressz, + .proc_release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); @@ -2355,9 +2379,9 @@ sg_proc_init(void) if (!p) return 1; - proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_fops); + proc_create("allow_dio", S_IRUGO | S_IWUSR, p, &adio_proc_ops); proc_create_seq("debug", S_IRUGO, p, &debug_seq_ops); - proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_fops); + proc_create("def_reserved_size", S_IRUGO | S_IWUSR, p, &dressz_proc_ops); proc_create_single("device_hdr", S_IRUGO, p, sg_proc_seq_show_devhdr); proc_create_seq("devices", S_IRUGO, p, &dev_seq_ops); proc_create_seq("device_strs", S_IRUGO, p, &devstrs_seq_ops); diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig index 97e159c2cecd..bc6506884e3b 100644 --- a/drivers/scsi/smartpqi/Kconfig +++ b/drivers/scsi/smartpqi/Kconfig @@ -1,6 +1,8 @@ # # Kernel configuration file for the SMARTPQI # +# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries +# Copyright (c) 2017-2018 Microsemi Corporation # Copyright (c) 2016 Microsemi Corporation # Copyright (c) 2016 PMC-Sierra, Inc. # (mailto:esc.storagedev@microsemi.com) diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h index e8e768849c70..1129fe7a27ed 100644 --- a/drivers/scsi/smartpqi/smartpqi.h +++ b/drivers/scsi/smartpqi/smartpqi.h @@ -276,7 +276,9 @@ struct pqi_raid_path_request { u8 reserved4 : 2; u8 additional_cdb_bytes_usage : 3; u8 reserved5 : 3; - u8 cdb[32]; + u8 cdb[16]; + u8 reserved6[12]; + __le32 timeout; struct pqi_sg_descriptor sg_descriptors[PQI_MAX_EMBEDDED_SG_DESCRIPTORS]; }; @@ -385,7 +387,8 @@ struct pqi_task_management_request { struct pqi_iu_header header; __le16 request_id; __le16 nexus_id; - u8 reserved[4]; + u8 reserved[2]; + __le16 timeout; u8 lun_number[8]; __le16 protocol_specific; __le16 outbound_queue_id_to_manage; @@ -445,7 +448,7 @@ struct pqi_vendor_general_response { struct pqi_ofa_memory { __le64 signature; /* "OFA_QRM" */ - __le16 version; /* version of this struct(1 = 1st version) */ + __le16 version; /* version of this struct (1 = 1st version) */ u8 reserved[62]; __le32 bytes_allocated; /* total allocated memory in bytes */ __le16 num_memory_descriptors; @@ -761,6 +764,8 @@ struct pqi_config_table_firmware_features { #define PQI_FIRMWARE_FEATURE_OFA 0 #define PQI_FIRMWARE_FEATURE_SMP 1 #define PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE 11 +#define PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT 13 +#define PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT 14 struct pqi_config_table_debug { struct pqi_config_table_section_header header; @@ -822,13 +827,21 @@ union pqi_reset_register { #define PQI_HBA_BUS 2 #define PQI_EXTERNAL_RAID_VOLUME_BUS 3 #define PQI_MAX_BUS PQI_EXTERNAL_RAID_VOLUME_BUS +#define PQI_VSEP_CISS_BTL 379 struct report_lun_header { __be32 list_length; - u8 extended_response; + u8 flags; u8 reserved[3]; }; +/* for flags field of struct report_lun_header */ +#define CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID (1 << 0) +#define CISS_REPORT_LOG_FLAG_QUEUE_DEPTH (1 << 5) +#define CISS_REPORT_LOG_FLAG_DRIVE_TYPE_MIX (1 << 6) + +#define CISS_REPORT_PHYS_FLAG_OTHER (1 << 1) + struct report_log_lun_extended_entry { u8 lunid[8]; u8 volume_id[16]; @@ -850,7 +863,7 @@ struct report_phys_lun_extended_entry { }; /* for device_flags field of struct report_phys_lun_extended_entry */ -#define REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED 0x8 +#define CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED 0x8 struct report_phys_lun_extended { struct report_lun_header header; @@ -863,7 +876,7 @@ struct raid_map_disk_data { u8 reserved[2]; }; -/* constants for flags field of RAID map */ +/* for flags field of RAID map */ #define RAID_MAP_ENCRYPTION_ENABLED 0x1 struct raid_map { @@ -906,7 +919,6 @@ struct pqi_scsi_dev { u8 scsi3addr[8]; __be64 wwid; u8 volume_id[16]; - u8 unique_id[16]; u8 is_physical_device : 1; u8 is_external_raid_device : 1; u8 is_expander_smp_device : 1; @@ -930,6 +942,9 @@ struct pqi_scsi_dev { u8 active_path_index; u8 path_map; u8 bay; + u8 box_index; + u8 phys_box_on_bus; + u8 phy_connected_dev_type; u8 box[8]; u16 phys_connector[8]; bool raid_bypass_configured; /* RAID bypass configured */ @@ -950,13 +965,9 @@ struct pqi_scsi_dev { }; /* VPD inquiry pages */ -#define SCSI_VPD_SUPPORTED_PAGES 0x0 /* standard page */ -#define SCSI_VPD_DEVICE_ID 0x83 /* standard page */ #define CISS_VPD_LV_DEVICE_GEOMETRY 0xc1 /* vendor-specific page */ #define CISS_VPD_LV_BYPASS_STATUS 0xc2 /* vendor-specific page */ #define CISS_VPD_LV_STATUS 0xc3 /* vendor-specific page */ -#define SCSI_VPD_HEADER_SZ 4 -#define SCSI_VPD_DEVICE_ID_IDX 8 /* Index of page id in page */ #define VPD_PAGE (1 << 8) @@ -1073,6 +1084,9 @@ struct pqi_ctrl_info { unsigned int ctrl_id; struct pci_dev *pci_dev; char firmware_version[11]; + char serial_number[17]; + char model[17]; + char vendor[9]; void __iomem *iomem_base; struct pqi_ctrl_registers __iomem *registers; struct pqi_device_registers __iomem *pqi_registers; @@ -1123,13 +1137,16 @@ struct pqi_ctrl_info { struct mutex ofa_mutex; /* serialize ofa */ bool controller_online; bool block_requests; - bool in_shutdown; + bool block_device_reset; bool in_ofa; + bool in_shutdown; u8 inbound_spanning_supported : 1; u8 outbound_spanning_supported : 1; u8 pqi_mode_enabled : 1; u8 pqi_reset_quiesce_supported : 1; u8 soft_reset_handshake_supported : 1; + u8 raid_iu_timeout_supported: 1; + u8 tmf_iu_timeout_supported: 1; struct list_head scsi_device_list; spinlock_t scsi_device_list_lock; @@ -1163,9 +1180,10 @@ struct pqi_ctrl_info { spinlock_t raid_bypass_retry_list_lock; struct work_struct raid_bypass_retry_work; - struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; - dma_addr_t pqi_ofa_mem_dma_handle; - void **pqi_ofa_chunk_virt_addr; + struct pqi_ofa_memory *pqi_ofa_mem_virt_addr; + dma_addr_t pqi_ofa_mem_dma_handle; + void **pqi_ofa_chunk_virt_addr; + atomic_t sync_cmds_outstanding; }; enum pqi_ctrl_mode { @@ -1184,10 +1202,6 @@ enum pqi_ctrl_mode { #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ #define CISS_GET_RAID_MAP 0xc8 -/* constants for CISS_REPORT_LOG/CISS_REPORT_PHYS commands */ -#define CISS_REPORT_LOG_EXTENDED 0x1 -#define CISS_REPORT_PHYS_EXTENDED 0x2 - /* BMIC commands */ #define BMIC_IDENTIFY_CONTROLLER 0x11 #define BMIC_IDENTIFY_PHYSICAL_DEVICE 0x15 @@ -1201,7 +1215,7 @@ enum pqi_ctrl_mode { #define BMIC_SET_DIAG_OPTIONS 0xf4 #define BMIC_SENSE_DIAG_OPTIONS 0xf5 -#define CSMI_CC_SAS_SMP_PASSTHRU 0X17 +#define CSMI_CC_SAS_SMP_PASSTHRU 0x17 #define SA_FLUSH_CACHE 0x1 @@ -1224,14 +1238,25 @@ struct bmic_identify_controller { __le16 extended_logical_unit_count; u8 reserved1[34]; __le16 firmware_build_number; - u8 reserved2[100]; + u8 reserved2[8]; + u8 vendor_id[8]; + u8 product_id[16]; + u8 reserved3[68]; u8 controller_mode; - u8 reserved3[32]; + u8 reserved4[32]; }; -#define SA_EXPANDER_SMP_DEVICE 0x05 -/*SCSI Invalid Device Type for SAS devices*/ -#define PQI_SAS_SCSI_INVALID_DEVTYPE 0xff +struct bmic_sense_subsystem_info { + u8 reserved[44]; + u8 ctrl_serial_number[16]; +}; + +/* constants for device_type field */ +#define SA_DEVICE_TYPE_SATA 0x1 +#define SA_DEVICE_TYPE_SAS 0x2 +#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5 +#define SA_DEVICE_TYPE_CONTROLLER 0x7 +#define SA_DEVICE_TYPE_NVME 0x9 struct bmic_identify_physical_device { u8 scsi_bus; /* SCSI Bus number on controller */ @@ -1257,7 +1282,7 @@ struct bmic_identify_physical_device { __le32 rpm; /* drive rotational speed in RPM */ u8 device_type; /* type of drive */ u8 sata_version; /* only valid when device_type = */ - /* BMIC_DEVICE_TYPE_SATA */ + /* SA_DEVICE_TYPE_SATA */ __le64 big_total_block_count; __le64 ris_starting_lba; __le32 ris_size; @@ -1380,18 +1405,6 @@ struct bmic_diag_options { #pragma pack() -static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) -{ - void *hostdata = shost_priv(shost); - - return *((struct pqi_ctrl_info **)hostdata); -} - -static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) -{ - return !ctrl_info->controller_online; -} - static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info) { atomic_inc(&ctrl_info->num_busy_threads); @@ -1402,9 +1415,11 @@ static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info) atomic_dec(&ctrl_info->num_busy_threads); } -static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost) { - return ctrl_info->block_requests; + void *hostdata = shost_priv(shost); + + return *((struct pqi_ctrl_info **)hostdata); } void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 8fd5ffc55792..b7492568e02f 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -33,11 +33,11 @@ #define BUILD_TIMESTAMP #endif -#define DRIVER_VERSION "1.2.6-015" +#define DRIVER_VERSION "1.2.10-025" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 2 -#define DRIVER_RELEASE 6 -#define DRIVER_REVISION 15 +#define DRIVER_RELEASE 10 +#define DRIVER_REVISION 25 #define DRIVER_NAME "Microsemi PQI Driver (v" \ DRIVER_VERSION BUILD_TIMESTAMP ")" @@ -145,6 +145,18 @@ MODULE_PARM_DESC(lockup_action, "Action to take when controller locked up.\n" "\t\tSupported: none, reboot, panic\n" "\t\tDefault: none"); +static int pqi_expose_ld_first; +module_param_named(expose_ld_first, + pqi_expose_ld_first, int, 0644); +MODULE_PARM_DESC(expose_ld_first, + "Expose logical drives before physical drives."); + +static int pqi_hide_vsep; +module_param_named(hide_vsep, + pqi_hide_vsep, int, 0644); +MODULE_PARM_DESC(hide_vsep, + "Hide the virtual SEP for direct attached drives."); + static char *raid_levels[] = { "RAID-0", "RAID-4", @@ -199,6 +211,11 @@ static inline bool pqi_is_external_raid_addr(u8 *scsi3addr) return scsi3addr[2] != 0; } +static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info) +{ + return !ctrl_info->controller_online; +} + static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info) { if (ctrl_info->controller_online) @@ -223,6 +240,21 @@ static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info, sis_write_driver_scratch(ctrl_info, mode); } +static inline void pqi_ctrl_block_device_reset(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->block_device_reset = true; +} + +static inline bool pqi_device_reset_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_device_reset; +} + +static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->block_requests; +} + static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info) { ctrl_info->block_requests = true; @@ -319,6 +351,16 @@ static inline bool pqi_device_in_remove(struct pqi_ctrl_info *ctrl_info, return device->in_remove && !ctrl_info->in_shutdown; } +static inline void pqi_ctrl_shutdown_start(struct pqi_ctrl_info *ctrl_info) +{ + ctrl_info->in_shutdown = true; +} + +static inline bool pqi_ctrl_in_shutdown(struct pqi_ctrl_info *ctrl_info) +{ + return ctrl_info->in_shutdown; +} + static inline void pqi_schedule_rescan_worker_with_delay( struct pqi_ctrl_info *ctrl_info, unsigned long delay) { @@ -348,6 +390,11 @@ static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info) cancel_delayed_work_sync(&ctrl_info->rescan_work); } +static inline void pqi_cancel_event_worker(struct pqi_ctrl_info *ctrl_info) +{ + cancel_work_sync(&ctrl_info->event_work); +} + static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info) { if (!ctrl_info->heartbeat_counter) @@ -365,7 +412,7 @@ static inline u8 pqi_read_soft_reset_status(struct pqi_ctrl_info *ctrl_info) } static inline void pqi_clear_soft_reset_status(struct pqi_ctrl_info *ctrl_info, - u8 clear) + u8 clear) { u8 status; @@ -450,9 +497,9 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, request->data_direction = SOP_READ_FLAG; cdb[0] = cmd; if (cmd == CISS_REPORT_PHYS) - cdb[1] = CISS_REPORT_PHYS_EXTENDED; + cdb[1] = CISS_REPORT_PHYS_FLAG_OTHER; else - cdb[1] = CISS_REPORT_LOG_EXTENDED; + cdb[1] = CISS_REPORT_LOG_FLAG_UNIQUE_LUN_ID; put_unaligned_be32(cdb_length, &cdb[6]); break; case CISS_GET_RAID_MAP: @@ -472,6 +519,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info, /* fall through */ case BMIC_IDENTIFY_CONTROLLER: case BMIC_IDENTIFY_PHYSICAL_DEVICE: + case BMIC_SENSE_SUBSYSTEM_INFORMATION: request->data_direction = SOP_READ_FLAG; cdb[0] = BMIC_READ; cdb[6] = cmd; @@ -554,13 +602,12 @@ static void pqi_free_io_request(struct pqi_io_request *io_request) } static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, - u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, - struct pqi_raid_error_info *error_info, - unsigned long timeout_msecs) + u8 *scsi3addr, void *buffer, size_t buffer_length, u16 vpd_page, + struct pqi_raid_error_info *error_info, unsigned long timeout_msecs) { int rc; - enum dma_data_direction dir; struct pqi_raid_path_request request; + enum dma_data_direction dir; rc = pqi_build_raid_path_request(ctrl_info, &request, cmd, scsi3addr, buffer, @@ -568,36 +615,44 @@ static int pqi_send_scsi_raid_request(struct pqi_ctrl_info *ctrl_info, u8 cmd, if (rc) return rc; - rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, - 0, error_info, timeout_msecs); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0, + error_info, timeout_msecs); pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + return rc; } -/* Helper functions for pqi_send_scsi_raid_request */ +/* helper functions for pqi_send_scsi_raid_request */ static inline int pqi_send_ctrl_raid_request(struct pqi_ctrl_info *ctrl_info, - u8 cmd, void *buffer, size_t buffer_length) + u8 cmd, void *buffer, size_t buffer_length) { return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, - buffer, buffer_length, 0, NULL, NO_TIMEOUT); + buffer, buffer_length, 0, NULL, NO_TIMEOUT); } static inline int pqi_send_ctrl_raid_with_error(struct pqi_ctrl_info *ctrl_info, - u8 cmd, void *buffer, size_t buffer_length, - struct pqi_raid_error_info *error_info) + u8 cmd, void *buffer, size_t buffer_length, + struct pqi_raid_error_info *error_info) { return pqi_send_scsi_raid_request(ctrl_info, cmd, RAID_CTLR_LUNID, - buffer, buffer_length, 0, error_info, NO_TIMEOUT); + buffer, buffer_length, 0, error_info, NO_TIMEOUT); } - static inline int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info, - struct bmic_identify_controller *buffer) + struct bmic_identify_controller *buffer) { return pqi_send_ctrl_raid_request(ctrl_info, BMIC_IDENTIFY_CONTROLLER, - buffer, sizeof(*buffer)); + buffer, sizeof(*buffer)); +} + +static inline int pqi_sense_subsystem_info(struct pqi_ctrl_info *ctrl_info, + struct bmic_sense_subsystem_info *sense_info) +{ + return pqi_send_ctrl_raid_request(ctrl_info, + BMIC_SENSE_SUBSYSTEM_INFORMATION, sense_info, + sizeof(*sense_info)); } static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, @@ -607,83 +662,9 @@ static inline int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info, buffer, buffer_length, vpd_page, NULL, NO_TIMEOUT); } -static bool pqi_vpd_page_supported(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u16 vpd_page) -{ - int rc; - int i; - int pages; - unsigned char *buf, bufsize; - - buf = kzalloc(256, GFP_KERNEL); - if (!buf) - return false; - - /* Get the size of the page list first */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, SCSI_VPD_HEADER_SZ); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - if ((pages + SCSI_VPD_HEADER_SZ) <= 255) - bufsize = pages + SCSI_VPD_HEADER_SZ; - else - bufsize = 255; - - /* Get the whole VPD page list */ - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_SUPPORTED_PAGES, - buf, bufsize); - if (rc != 0) - goto exit_unsupported; - - pages = buf[3]; - for (i = 1; i <= pages; i++) - if (buf[3 + i] == vpd_page) - goto exit_supported; - -exit_unsupported: - kfree(buf); - return false; - -exit_supported: - kfree(buf); - return true; -} - -static int pqi_get_device_id(struct pqi_ctrl_info *ctrl_info, - u8 *scsi3addr, u8 *device_id, int buflen) -{ - int rc; - unsigned char *buf; - - if (!pqi_vpd_page_supported(ctrl_info, scsi3addr, SCSI_VPD_DEVICE_ID)) - return 1; /* function not supported */ - - buf = kzalloc(64, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - rc = pqi_scsi_inquiry(ctrl_info, scsi3addr, - VPD_PAGE | SCSI_VPD_DEVICE_ID, - buf, 64); - if (rc == 0) { - if (buflen > 16) - buflen = 16; - memcpy(device_id, &buf[SCSI_VPD_DEVICE_ID_IDX], buflen); - } - - kfree(buf); - - return rc; -} - static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, - struct bmic_identify_physical_device *buffer, - size_t buffer_length) + struct bmic_identify_physical_device *buffer, size_t buffer_length) { int rc; enum dma_data_direction dir; @@ -704,6 +685,7 @@ static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info, 0, NULL, NO_TIMEOUT); pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1, dir); + return rc; } @@ -742,7 +724,7 @@ int pqi_csmi_smp_passthru(struct pqi_ctrl_info *ctrl_info, buffer, buffer_length, error_info); } -#define PQI_FETCH_PTRAID_DATA (1UL<<31) +#define PQI_FETCH_PTRAID_DATA (1 << 31) static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) { @@ -754,14 +736,15 @@ static int pqi_set_diag_rescan(struct pqi_ctrl_info *ctrl_info) return -ENOMEM; rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SENSE_DIAG_OPTIONS, - diag, sizeof(*diag)); + diag, sizeof(*diag)); if (rc) goto out; diag->options |= cpu_to_le32(PQI_FETCH_PTRAID_DATA); - rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, - diag, sizeof(*diag)); + rc = pqi_send_ctrl_raid_request(ctrl_info, BMIC_SET_DIAG_OPTIONS, diag, + sizeof(*diag)); + out: kfree(diag); @@ -772,7 +755,7 @@ static inline int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info, void *buffer, size_t buffer_length) { return pqi_send_ctrl_raid_request(ctrl_info, BMIC_WRITE_HOST_WELLNESS, - buffer, buffer_length); + buffer, buffer_length); } #pragma pack(1) @@ -925,7 +908,7 @@ static inline int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, void *buffer, size_t buffer_length) { return pqi_send_ctrl_raid_request(ctrl_info, cmd, buffer, - buffer_length); + buffer_length); } static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd, @@ -1259,9 +1242,9 @@ static void pqi_get_raid_bypass_status(struct pqi_ctrl_info *ctrl_info, if (rc) goto out; -#define RAID_BYPASS_STATUS 4 -#define RAID_BYPASS_CONFIGURED 0x1 -#define RAID_BYPASS_ENABLED 0x2 +#define RAID_BYPASS_STATUS 4 +#define RAID_BYPASS_CONFIGURED 0x1 +#define RAID_BYPASS_ENABLED 0x2 bypass_status = buffer[RAID_BYPASS_STATUS]; device->raid_bypass_configured = @@ -1364,14 +1347,6 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info, } } - if (pqi_get_device_id(ctrl_info, device->scsi3addr, - device->unique_id, sizeof(device->unique_id)) < 0) - dev_warn(&ctrl_info->pci_dev->dev, - "Can't get device id for scsi %d:%d:%d:%d\n", - ctrl_info->scsi_host->host_no, - device->bus, device->target, - device->lun); - out: kfree(buffer); @@ -1393,6 +1368,9 @@ static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info, return; } + device->box_index = id_phys->box_index; + device->phys_box_on_bus = id_phys->phys_box_on_bus; + device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0]; device->queue_depth = get_unaligned_le16(&id_phys->current_queue_depth_limit); device->device_type = id_phys->device_type; @@ -1719,6 +1697,10 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device, existing_device->active_path_index = new_device->active_path_index; existing_device->path_map = new_device->path_map; existing_device->bay = new_device->bay; + existing_device->box_index = new_device->box_index; + existing_device->phys_box_on_bus = new_device->phys_box_on_bus; + existing_device->phy_connected_dev_type = + new_device->phy_connected_dev_type; memcpy(existing_device->box, new_device->box, sizeof(existing_device->box)); memcpy(existing_device->phys_connector, new_device->phys_connector, @@ -1801,7 +1783,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info, device = new_device_list[i]; find_result = pqi_scsi_find_entry(ctrl_info, device, - &matching_device); + &matching_device); switch (find_result) { case DEVICE_SAME: @@ -1945,6 +1927,11 @@ static inline bool pqi_skip_device(u8 *scsi3addr) return false; } +static inline void pqi_mask_device(u8 *scsi3addr) +{ + scsi3addr[3] |= 0xc0; +} + static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device) { if (!device->is_physical_device) @@ -1988,6 +1975,8 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) unsigned int num_valid_devices; bool is_physical_device; u8 *scsi3addr; + unsigned int physical_index; + unsigned int logical_index; static char *out_of_memory_msg = "failed to allocate memory, device discovery stopped"; @@ -2023,6 +2012,20 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) rc = -ENOMEM; goto out; } + + if (pqi_hide_vsep) { + for (i = num_physicals - 1; i >= 0; i--) { + phys_lun_ext_entry = + &physdev_list->lun_entries[i]; + if (CISS_GET_DRIVE_NUMBER( + phys_lun_ext_entry->lunid) == + PQI_VSEP_CISS_BTL) { + pqi_mask_device( + phys_lun_ext_entry->lunid); + break; + } + } + } } num_new_devices = num_physicals + num_logicals; @@ -2050,19 +2053,23 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) device = NULL; num_valid_devices = 0; + physical_index = 0; + logical_index = 0; for (i = 0; i < num_new_devices; i++) { - if (i < num_physicals) { + if ((!pqi_expose_ld_first && i < num_physicals) || + (pqi_expose_ld_first && i >= num_logicals)) { is_physical_device = true; - phys_lun_ext_entry = &physdev_list->lun_entries[i]; + phys_lun_ext_entry = + &physdev_list->lun_entries[physical_index++]; log_lun_ext_entry = NULL; scsi3addr = phys_lun_ext_entry->lunid; } else { is_physical_device = false; phys_lun_ext_entry = NULL; log_lun_ext_entry = - &logdev_list->lun_entries[i - num_physicals]; + &logdev_list->lun_entries[logical_index++]; scsi3addr = log_lun_ext_entry->lunid; } @@ -2079,7 +2086,7 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) device->is_physical_device = is_physical_device; if (is_physical_device) { if (phys_lun_ext_entry->device_type == - SA_EXPANDER_SMP_DEVICE) + SA_DEVICE_TYPE_EXPANDER_SMP) device->is_expander_smp_device = true; } else { device->is_external_raid_device = @@ -2116,17 +2123,13 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info) if (device->is_physical_device) { device->wwid = phys_lun_ext_entry->wwid; if ((phys_lun_ext_entry->device_flags & - REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) && + CISS_REPORT_PHYS_DEV_FLAG_AIO_ENABLED) && phys_lun_ext_entry->aio_handle) { device->aio_enabled = true; - device->aio_handle = - phys_lun_ext_entry->aio_handle; - } - if (device->devtype == TYPE_DISK || - device->devtype == TYPE_ZBC) { - pqi_get_physical_disk_info(ctrl_info, - device, id_phys); + device->aio_handle = + phys_lun_ext_entry->aio_handle; } + pqi_get_physical_disk_info(ctrl_info, device, id_phys); } else { memcpy(device->volume_id, log_lun_ext_entry->volume_id, sizeof(device->volume_id)); @@ -2184,18 +2187,20 @@ static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info) static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info) { - int rc; + int rc = 0; if (pqi_ctrl_offline(ctrl_info)) return -ENXIO; - mutex_lock(&ctrl_info->scan_mutex); - - rc = pqi_update_scsi_devices(ctrl_info); - if (rc) + if (!mutex_trylock(&ctrl_info->scan_mutex)) { pqi_schedule_rescan_worker_delayed(ctrl_info); - - mutex_unlock(&ctrl_info->scan_mutex); + rc = -EINPROGRESS; + } else { + rc = pqi_update_scsi_devices(ctrl_info); + if (rc) + pqi_schedule_rescan_worker_delayed(ctrl_info); + mutex_unlock(&ctrl_info->scan_mutex); + } return rc; } @@ -3104,7 +3109,7 @@ static enum pqi_soft_reset_status pqi_poll_for_soft_reset_status( } static void pqi_process_soft_reset(struct pqi_ctrl_info *ctrl_info, - enum pqi_soft_reset_status reset_status) + enum pqi_soft_reset_status reset_status) { int rc; @@ -3148,8 +3153,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, if (event_id == PQI_EVENT_OFA_QUIESCE) { dev_info(&ctrl_info->pci_dev->dev, - "Received Online Firmware Activation quiesce event for controller %u\n", - ctrl_info->ctrl_id); + "Received Online Firmware Activation quiesce event for controller %u\n", + ctrl_info->ctrl_id); pqi_ofa_ctrl_quiesce(ctrl_info); pqi_acknowledge_event(ctrl_info, event); if (ctrl_info->soft_reset_handshake_supported) { @@ -3169,8 +3174,8 @@ static void pqi_ofa_process_event(struct pqi_ctrl_info *ctrl_info, pqi_ofa_free_host_buffer(ctrl_info); pqi_acknowledge_event(ctrl_info, event); dev_info(&ctrl_info->pci_dev->dev, - "Online Firmware Activation(%u) cancel reason : %u\n", - ctrl_info->ctrl_id, event->ofa_cancel_reason); + "Online Firmware Activation(%u) cancel reason : %u\n", + ctrl_info->ctrl_id, event->ofa_cancel_reason); } mutex_unlock(&ctrl_info->ofa_mutex); @@ -3349,7 +3354,7 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info) #define PQI_LEGACY_INTX_MASK 0x1 static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info, - bool enable_intx) + bool enable_intx) { u32 intx_mask; struct pqi_device_registers __iomem *pqi_registers; @@ -3787,7 +3792,7 @@ static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info) &pqi_registers->admin_oq_pi_addr); reg = PQI_ADMIN_IQ_NUM_ELEMENTS | - (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 | + (PQI_ADMIN_OQ_NUM_ELEMENTS << 8) | (admin_queues->int_msg_num << 16); writel(reg, &pqi_registers->admin_iq_num_elements); writel(PQI_CREATE_ADMIN_QUEUE_PAIR, @@ -3994,8 +3999,8 @@ static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request, complete(waiting); } -static int pqi_process_raid_io_error_synchronous(struct pqi_raid_error_info - *error_info) +static int pqi_process_raid_io_error_synchronous( + struct pqi_raid_error_info *error_info) { int rc = -EIO; @@ -4068,6 +4073,8 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, goto out; } + atomic_inc(&ctrl_info->sync_cmds_outstanding); + io_request = pqi_alloc_io_request(ctrl_info); put_unaligned_le16(io_request->index, @@ -4114,6 +4121,7 @@ static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info, pqi_free_io_request(io_request); + atomic_dec(&ctrl_info->sync_cmds_outstanding); out: up(&ctrl_info->sync_request_sem); @@ -4611,11 +4619,11 @@ static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info) static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info) { - ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, - ctrl_info->error_buffer_length, - &ctrl_info->error_buffer_dma_handle, - GFP_KERNEL); + ctrl_info->error_buffer = dma_alloc_coherent(&ctrl_info->pci_dev->dev, + ctrl_info->error_buffer_length, + &ctrl_info->error_buffer_dma_handle, + GFP_KERNEL); if (!ctrl_info->error_buffer) return -ENOMEM; @@ -5348,7 +5356,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, pqi_ctrl_busy(ctrl_info); if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device) || - pqi_ctrl_in_ofa(ctrl_info)) { + pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) { rc = SCSI_MLQUEUE_HOST_BUSY; goto out; } @@ -5365,7 +5373,7 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost, if (pqi_is_logical_device(device)) { raid_bypassed = false; if (device->raid_bypass_enabled && - !blk_rq_is_passthrough(scmd->request)) { + !blk_rq_is_passthrough(scmd->request)) { rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device, scmd, queue_group); if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) @@ -5596,6 +5604,18 @@ static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info, return 0; } +static int pqi_ctrl_wait_for_pending_sync_cmds(struct pqi_ctrl_info *ctrl_info) +{ + while (atomic_read(&ctrl_info->sync_cmds_outstanding)) { + pqi_check_ctrl_health(ctrl_info); + if (pqi_ctrl_offline(ctrl_info)) + return -ENXIO; + usleep_range(1000, 2000); + } + + return 0; +} + static void pqi_lun_reset_complete(struct pqi_io_request *io_request, void *context) { @@ -5604,7 +5624,8 @@ static void pqi_lun_reset_complete(struct pqi_io_request *io_request, complete(waiting); } -#define PQI_LUN_RESET_TIMEOUT_SECS 10 +#define PQI_LUN_RESET_TIMEOUT_SECS 30 +#define PQI_LUN_RESET_POLL_COMPLETION_SECS 10 static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *device, struct completion *wait) @@ -5613,7 +5634,7 @@ static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info, while (1) { if (wait_for_completion_io_timeout(wait, - PQI_LUN_RESET_TIMEOUT_SECS * PQI_HZ)) { + PQI_LUN_RESET_POLL_COMPLETION_SECS * PQI_HZ)) { rc = 0; break; } @@ -5650,6 +5671,9 @@ static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info, memcpy(request->lun_number, device->scsi3addr, sizeof(request->lun_number)); request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET; + if (ctrl_info->tmf_iu_timeout_supported) + put_unaligned_le16(PQI_LUN_RESET_TIMEOUT_SECS, + &request->timeout); pqi_start_io(ctrl_info, &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH, @@ -5679,7 +5703,7 @@ static int _pqi_device_reset(struct pqi_ctrl_info *ctrl_info, for (retries = 0;;) { rc = pqi_lun_reset(ctrl_info, device); - if (rc != -EAGAIN || ++retries > PQI_LUN_RESET_RETRIES) + if (rc == 0 || ++retries > PQI_LUN_RESET_RETRIES) break; msleep(PQI_LUN_RESET_RETRY_INTERVAL_MSECS); } @@ -5733,17 +5757,17 @@ static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd) shost->host_no, device->bus, device->target, device->lun); pqi_check_ctrl_health(ctrl_info); - if (pqi_ctrl_offline(ctrl_info)) { - dev_err(&ctrl_info->pci_dev->dev, - "controller %u offlined - cannot send device reset\n", - ctrl_info->ctrl_id); + if (pqi_ctrl_offline(ctrl_info) || + pqi_device_reset_blocked(ctrl_info)) { rc = FAILED; goto out; } pqi_wait_until_ofa_finished(ctrl_info); + atomic_inc(&ctrl_info->sync_cmds_outstanding); rc = pqi_device_reset(ctrl_info, device); + atomic_dec(&ctrl_info->sync_cmds_outstanding); out: dev_err(&ctrl_info->pci_dev->dev, @@ -6012,6 +6036,9 @@ static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg) put_unaligned_le16(iu_length, &request.header.iu_length); + if (ctrl_info->raid_iu_timeout_supported) + put_unaligned_le32(iocommand.Request.Timeout, &request.timeout); + rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT); @@ -6065,7 +6092,7 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, ctrl_info = shost_to_hba(sdev->host); - if (pqi_ctrl_in_ofa(ctrl_info)) + if (pqi_ctrl_in_ofa(ctrl_info) || pqi_ctrl_in_shutdown(ctrl_info)) return -EBUSY; switch (cmd) { @@ -6091,23 +6118,59 @@ static int pqi_ioctl(struct scsi_device *sdev, unsigned int cmd, return rc; } -static ssize_t pqi_version_show(struct device *dev, +static ssize_t pqi_firmware_version_show(struct device *dev, struct device_attribute *attr, char *buffer) { - ssize_t count = 0; struct Scsi_Host *shost; struct pqi_ctrl_info *ctrl_info; shost = class_to_shost(dev); ctrl_info = shost_to_hba(shost); - count += snprintf(buffer + count, PAGE_SIZE - count, - " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP); + return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->firmware_version); +} - count += snprintf(buffer + count, PAGE_SIZE - count, - "firmware: %s\n", ctrl_info->firmware_version); +static ssize_t pqi_driver_version_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + return snprintf(buffer, PAGE_SIZE, "%s\n", + DRIVER_VERSION BUILD_TIMESTAMP); +} - return count; +static ssize_t pqi_serial_number_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->serial_number); +} + +static ssize_t pqi_model_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->model); +} + +static ssize_t pqi_vendor_show(struct device *dev, + struct device_attribute *attr, char *buffer) +{ + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + + shost = class_to_shost(dev); + ctrl_info = shost_to_hba(shost); + + return snprintf(buffer, PAGE_SIZE, "%s\n", ctrl_info->vendor); } static ssize_t pqi_host_rescan_store(struct device *dev, @@ -6160,13 +6223,21 @@ static ssize_t pqi_lockup_action_store(struct device *dev, return -EINVAL; } -static DEVICE_ATTR(version, 0444, pqi_version_show, NULL); +static DEVICE_ATTR(driver_version, 0444, pqi_driver_version_show, NULL); +static DEVICE_ATTR(firmware_version, 0444, pqi_firmware_version_show, NULL); +static DEVICE_ATTR(model, 0444, pqi_model_show, NULL); +static DEVICE_ATTR(serial_number, 0444, pqi_serial_number_show, NULL); +static DEVICE_ATTR(vendor, 0444, pqi_vendor_show, NULL); static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store); static DEVICE_ATTR(lockup_action, 0644, pqi_lockup_action_show, pqi_lockup_action_store); static struct device_attribute *pqi_shost_attrs[] = { - &dev_attr_version, + &dev_attr_driver_version, + &dev_attr_firmware_version, + &dev_attr_model, + &dev_attr_serial_number, + &dev_attr_vendor, &dev_attr_rescan, &dev_attr_lockup_action, NULL @@ -6179,7 +6250,7 @@ static ssize_t pqi_unique_id_show(struct device *dev, struct scsi_device *sdev; struct pqi_scsi_dev *device; unsigned long flags; - unsigned char uid[16]; + u8 unique_id[16]; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -6192,16 +6263,22 @@ static ssize_t pqi_unique_id_show(struct device *dev, flags); return -ENODEV; } - memcpy(uid, device->unique_id, sizeof(uid)); + + if (device->is_physical_device) { + memset(unique_id, 0, 8); + memcpy(unique_id + 8, &device->wwid, sizeof(device->wwid)); + } else { + memcpy(unique_id, device->volume_id, sizeof(device->volume_id)); + } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); return snprintf(buffer, PAGE_SIZE, "%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X%02X\n", - uid[0], uid[1], uid[2], uid[3], - uid[4], uid[5], uid[6], uid[7], - uid[8], uid[9], uid[10], uid[11], - uid[12], uid[13], uid[14], uid[15]); + unique_id[0], unique_id[1], unique_id[2], unique_id[3], + unique_id[4], unique_id[5], unique_id[6], unique_id[7], + unique_id[8], unique_id[9], unique_id[10], unique_id[11], + unique_id[12], unique_id[13], unique_id[14], unique_id[15]); } static ssize_t pqi_lunid_show(struct device *dev, @@ -6224,6 +6301,7 @@ static ssize_t pqi_lunid_show(struct device *dev, flags); return -ENODEV; } + memcpy(lunid, device->scsi3addr, sizeof(lunid)); spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -6231,7 +6309,8 @@ static ssize_t pqi_lunid_show(struct device *dev, return snprintf(buffer, PAGE_SIZE, "0x%8phN\n", lunid); } -#define MAX_PATHS 8 +#define MAX_PATHS 8 + static ssize_t pqi_path_info_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -6243,9 +6322,9 @@ static ssize_t pqi_path_info_show(struct device *dev, int output_len = 0; u8 box; u8 bay; - u8 path_map_index = 0; + u8 path_map_index; char *active; - unsigned char phys_connector[2]; + u8 phys_connector[2]; sdev = to_scsi_device(dev); ctrl_info = shost_to_hba(sdev->host); @@ -6261,7 +6340,7 @@ static ssize_t pqi_path_info_show(struct device *dev, bay = device->bay; for (i = 0; i < MAX_PATHS; i++) { - path_map_index = 1<<i; + path_map_index = 1 << i; if (i == device->active_path_index) active = "Active"; else if (device->path_map & path_map_index) @@ -6312,10 +6391,10 @@ end_buffer: } spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + return output_len; } - static ssize_t pqi_sas_address_show(struct device *dev, struct device_attribute *attr, char *buffer) { @@ -6336,6 +6415,7 @@ static ssize_t pqi_sas_address_show(struct device *dev, flags); return -ENODEV; } + sas_address = device->sas_address; spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); @@ -6558,7 +6638,30 @@ static int pqi_reset(struct pqi_ctrl_info *ctrl_info) return rc; } -static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) +static int pqi_get_ctrl_serial_number(struct pqi_ctrl_info *ctrl_info) +{ + int rc; + struct bmic_sense_subsystem_info *sense_info; + + sense_info = kzalloc(sizeof(*sense_info), GFP_KERNEL); + if (!sense_info) + return -ENOMEM; + + rc = pqi_sense_subsystem_info(ctrl_info, sense_info); + if (rc) + goto out; + + memcpy(ctrl_info->serial_number, sense_info->ctrl_serial_number, + sizeof(sense_info->ctrl_serial_number)); + ctrl_info->serial_number[sizeof(sense_info->ctrl_serial_number)] = '\0'; + +out: + kfree(sense_info); + + return rc; +} + +static int pqi_get_ctrl_product_details(struct pqi_ctrl_info *ctrl_info) { int rc; struct bmic_identify_controller *identify; @@ -6579,6 +6682,14 @@ static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info) sizeof(ctrl_info->firmware_version), "-%u", get_unaligned_le16(&identify->firmware_build_number)); + memcpy(ctrl_info->model, identify->product_id, + sizeof(identify->product_id)); + ctrl_info->model[sizeof(identify->product_id)] = '\0'; + + memcpy(ctrl_info->vendor, identify->vendor_id, + sizeof(identify->vendor_id)); + ctrl_info->vendor[sizeof(identify->vendor_id)] = '\0'; + out: kfree(identify); @@ -6709,6 +6820,27 @@ static void pqi_firmware_feature_status(struct pqi_ctrl_info *ctrl_info, firmware_feature->feature_name); } +static void pqi_ctrl_update_feature_flags(struct pqi_ctrl_info *ctrl_info, + struct pqi_firmware_feature *firmware_feature) +{ + switch (firmware_feature->feature_bit) { + case PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE: + ctrl_info->soft_reset_handshake_supported = + firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT: + ctrl_info->raid_iu_timeout_supported = + firmware_feature->enabled; + break; + case PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT: + ctrl_info->tmf_iu_timeout_supported = + firmware_feature->enabled; + break; + } + + pqi_firmware_feature_status(ctrl_info, firmware_feature); +} + static inline void pqi_firmware_feature_update(struct pqi_ctrl_info *ctrl_info, struct pqi_firmware_feature *firmware_feature) { @@ -6732,7 +6864,17 @@ static struct pqi_firmware_feature pqi_firmware_features[] = { { .feature_name = "New Soft Reset Handshake", .feature_bit = PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE, - .feature_status = pqi_firmware_feature_status, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "RAID IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_RAID_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, + }, + { + .feature_name = "TMF IU Timeout", + .feature_bit = PQI_FIRMWARE_FEATURE_TMF_IU_TIMEOUT, + .feature_status = pqi_ctrl_update_feature_flags, }, }; @@ -6786,7 +6928,6 @@ static void pqi_process_firmware_features( return; } - ctrl_info->soft_reset_handshake_supported = false; for (i = 0; i < ARRAY_SIZE(pqi_firmware_features); i++) { if (!pqi_firmware_features[i].supported) continue; @@ -6794,10 +6935,6 @@ static void pqi_process_firmware_features( firmware_features_iomem_addr, pqi_firmware_features[i].feature_bit)) { pqi_firmware_features[i].enabled = true; - if (pqi_firmware_features[i].feature_bit == - PQI_FIRMWARE_FEATURE_SOFT_RESET_HANDSHAKE) - ctrl_info->soft_reset_handshake_supported = - true; } pqi_firmware_feature_update(ctrl_info, &pqi_firmware_features[i]); @@ -6939,13 +7076,20 @@ static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info) return pqi_revert_to_sis_mode(ctrl_info); } +#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 + static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) { int rc; - rc = pqi_force_sis_mode(ctrl_info); - if (rc) - return rc; + if (reset_devices) { + sis_soft_reset(ctrl_info); + msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); + } else { + rc = pqi_force_sis_mode(ctrl_info); + if (rc) + return rc; + } /* * Wait until the controller is ready to start accepting SIS @@ -7098,10 +7242,17 @@ static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info) if (rc) return rc; - rc = pqi_get_ctrl_firmware_version(ctrl_info); + rc = pqi_get_ctrl_product_details(ctrl_info); + if (rc) { + dev_err(&ctrl_info->pci_dev->dev, + "error obtaining product details\n"); + return rc; + } + + rc = pqi_get_ctrl_serial_number(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error obtaining firmware version\n"); + "error obtaining ctrl serial number\n"); return rc; } @@ -7241,10 +7392,10 @@ static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info) return rc; } - rc = pqi_get_ctrl_firmware_version(ctrl_info); + rc = pqi_get_ctrl_product_details(ctrl_info); if (rc) { dev_err(&ctrl_info->pci_dev->dev, - "error obtaining firmware version\n"); + "error obtaining product details\n"); return rc; } @@ -7306,7 +7457,7 @@ static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info) goto disable_device; } - ctrl_info->iomem_base = ioremap_nocache(pci_resource_start( + ctrl_info->iomem_base = ioremap(pci_resource_start( ctrl_info->pci_dev, 0), sizeof(struct pqi_ctrl_registers)); if (!ctrl_info->iomem_base) { @@ -7372,6 +7523,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_WORK(&ctrl_info->event_work, pqi_event_worker); atomic_set(&ctrl_info->num_interrupts, 0); + atomic_set(&ctrl_info->sync_cmds_outstanding, 0); INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); @@ -7579,6 +7731,8 @@ static void pqi_ofa_setup_host_buffer(struct pqi_ctrl_info *ctrl_info, dev_err(dev, "Failed to allocate host buffer of size = %u", bytes_requested); } + + return; } static void pqi_ofa_free_host_buffer(struct pqi_ctrl_info *ctrl_info) @@ -7645,8 +7799,6 @@ static int pqi_ofa_host_memory_update(struct pqi_ctrl_info *ctrl_info) 0, NULL, NO_TIMEOUT); } -#define PQI_POST_RESET_DELAY_B4_MSGU_READY 5000 - static int pqi_ofa_ctrl_restart(struct pqi_ctrl_info *ctrl_info) { msleep(PQI_POST_RESET_DELAY_B4_MSGU_READY); @@ -7814,28 +7966,73 @@ static void pqi_pci_remove(struct pci_dev *pci_dev) pqi_remove_ctrl(ctrl_info); } +static void pqi_crash_if_pending_command(struct pqi_ctrl_info *ctrl_info) +{ + unsigned int i; + struct pqi_io_request *io_request; + struct scsi_cmnd *scmd; + + for (i = 0; i < ctrl_info->max_io_slots; i++) { + io_request = &ctrl_info->io_request_pool[i]; + if (atomic_read(&io_request->refcount) == 0) + continue; + scmd = io_request->scmd; + WARN_ON(scmd != NULL); /* IO command from SML */ + WARN_ON(scmd == NULL); /* Non-IO cmd or driver initiated*/ + } +} + static void pqi_shutdown(struct pci_dev *pci_dev) { int rc; struct pqi_ctrl_info *ctrl_info; ctrl_info = pci_get_drvdata(pci_dev); - if (!ctrl_info) - goto error; + if (!ctrl_info) { + dev_err(&pci_dev->dev, + "cache could not be flushed\n"); + return; + } + + pqi_disable_events(ctrl_info); + pqi_wait_until_ofa_finished(ctrl_info); + pqi_cancel_update_time_worker(ctrl_info); + pqi_cancel_rescan_worker(ctrl_info); + pqi_cancel_event_worker(ctrl_info); + + pqi_ctrl_shutdown_start(ctrl_info); + pqi_ctrl_wait_until_quiesced(ctrl_info); + + rc = pqi_ctrl_wait_for_pending_io(ctrl_info, NO_TIMEOUT); + if (rc) { + dev_err(&pci_dev->dev, + "wait for pending I/O failed\n"); + return; + } + + pqi_ctrl_block_device_reset(ctrl_info); + pqi_wait_until_lun_reset_finished(ctrl_info); /* * Write all data in the controller's battery-backed cache to * storage. */ rc = pqi_flush_cache(ctrl_info, SHUTDOWN); - pqi_free_interrupts(ctrl_info); - pqi_reset(ctrl_info); - if (rc == 0) + if (rc) + dev_err(&pci_dev->dev, + "unable to flush controller cache\n"); + + pqi_ctrl_block_requests(ctrl_info); + + rc = pqi_ctrl_wait_for_pending_sync_cmds(ctrl_info); + if (rc) { + dev_err(&pci_dev->dev, + "wait for pending sync cmds failed\n"); return; + } -error: - dev_warn(&pci_dev->dev, - "unable to flush controller cache\n"); + pqi_crash_if_pending_command(ctrl_info); + pqi_reset(ctrl_info); } static void pqi_process_lockup_action_param(void) @@ -8024,6 +8221,10 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1bd4, 0x004f) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, 0x19e5, 0xd227) }, { @@ -8088,6 +8289,14 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0808) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_ADAPTEC2, 0x0809) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_VENDOR_ID_ADAPTEC2, 0x0900) }, { @@ -8244,6 +8453,26 @@ static const struct pci_device_id pqi_pci_id_table[] = { }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0800) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0908) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0806) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + 0x1d8d, 0x0916) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, + PCI_VENDOR_ID_GIGABYTE, 0x1000) + }, + { + PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f, PCI_ANY_ID, PCI_ANY_ID) }, { 0 } @@ -8460,11 +8689,11 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_general_admin_request, data.delete_operational_queue.queue_id) != 12); BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.create_operational_iq) != 64 - 11); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.create_operational_oq) != 64 - 11); - BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request, + BUILD_BUG_ON(sizeof_field(struct pqi_general_admin_request, data.delete_operational_queue) != 64 - 11); BUILD_BUG_ON(offsetof(struct pqi_general_admin_response, @@ -8512,6 +8741,8 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, cdb) != 32); BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, + timeout) != 60); + BUILD_BUG_ON(offsetof(struct pqi_raid_path_request, sg_descriptors) != 64); BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) != PQI_OPERATIONAL_IQ_ELEMENT_LENGTH); @@ -8666,6 +8897,8 @@ static void __attribute__((unused)) verify_structures(void) BUILD_BUG_ON(offsetof(struct pqi_task_management_request, nexus_id) != 10); BUILD_BUG_ON(offsetof(struct pqi_task_management_request, + timeout) != 14); + BUILD_BUG_ON(offsetof(struct pqi_task_management_request, lun_number) != 16); BUILD_BUG_ON(offsetof(struct pqi_task_management_request, protocol_specific) != 24); diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c index 5cca1b9ef1f1..b7289112455c 100644 --- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c +++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c @@ -45,9 +45,9 @@ static void pqi_free_sas_phy(struct pqi_sas_phy *pqi_sas_phy) struct sas_phy *phy = pqi_sas_phy->phy; sas_port_delete_phy(pqi_sas_phy->parent_port->port, phy); - sas_phy_free(phy); if (pqi_sas_phy->added_to_port) list_del(&pqi_sas_phy->phy_list_entry); + sas_phy_delete(phy); kfree(pqi_sas_phy); } @@ -312,12 +312,107 @@ static int pqi_sas_get_linkerrors(struct sas_phy *phy) static int pqi_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier) { - return 0; + int rc; + unsigned long flags; + struct Scsi_Host *shost; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *found_device; + struct pqi_scsi_dev *device; + + if (!rphy) + return -ENODEV; + + shost = rphy_to_shost(rphy); + ctrl_info = shost_to_hba(shost); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + found_device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); + + if (!found_device) { + rc = -ENODEV; + goto out; + } + + if (found_device->devtype == TYPE_ENCLOSURE) { + *identifier = get_unaligned_be64(&found_device->wwid); + rc = 0; + goto out; + } + + if (found_device->box_index == 0xff || + found_device->phys_box_on_bus == 0 || + found_device->bay == 0xff) { + rc = -EINVAL; + goto out; + } + + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (device->devtype == TYPE_ENCLOSURE && + device->box_index == found_device->box_index && + device->phys_box_on_bus == + found_device->phys_box_on_bus && + memcmp(device->phys_connector, + found_device->phys_connector, 2) == 0) { + *identifier = + get_unaligned_be64(&device->wwid); + rc = 0; + goto out; + } + } + + if (found_device->phy_connected_dev_type != SA_DEVICE_TYPE_CONTROLLER) { + rc = -EINVAL; + goto out; + } + + list_for_each_entry(device, &ctrl_info->scsi_device_list, + scsi_device_list_entry) { + if (device->devtype == TYPE_ENCLOSURE && + CISS_GET_DRIVE_NUMBER(device->scsi3addr) == + PQI_VSEP_CISS_BTL) { + *identifier = get_unaligned_be64(&device->wwid); + rc = 0; + goto out; + } + } + + rc = -EINVAL; +out: + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return rc; } static int pqi_sas_get_bay_identifier(struct sas_rphy *rphy) { - return -ENXIO; + int rc; + unsigned long flags; + struct pqi_ctrl_info *ctrl_info; + struct pqi_scsi_dev *device; + struct Scsi_Host *shost; + + if (!rphy) + return -ENODEV; + + shost = rphy_to_shost(rphy); + ctrl_info = shost_to_hba(shost); + spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags); + device = pqi_find_device_by_sas_rphy(ctrl_info, rphy); + + if (!device) { + rc = -ENODEV; + goto out; + } + + if (device->bay == 0xff) + rc = -EINVAL; + else + rc = device->bay; + +out: + spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags); + + return rc; } static int pqi_sas_phy_reset(struct sas_phy *phy, int hard_reset) @@ -384,7 +479,6 @@ pqi_build_csmi_smp_passthru_buffer(struct sas_rphy *rphy, req_size -= SMP_CRC_FIELD_LENGTH; put_unaligned_le32(req_size, ¶meters->request_length); - put_unaligned_le32(resp_size, ¶meters->response_length); sg_copy_to_buffer(job->request_payload.sg_list, @@ -414,12 +508,12 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, struct sas_rphy *rphy) { int rc; - struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); + struct pqi_ctrl_info *ctrl_info; struct bmic_csmi_smp_passthru_buffer *smp_buf; struct pqi_raid_error_info error_info; unsigned int reslen = 0; - pqi_ctrl_busy(ctrl_info); + ctrl_info = shost_to_hba(shost); if (job->reply_payload.payload_len == 0) { rc = -ENOMEM; @@ -441,16 +535,6 @@ void pqi_sas_smp_handler(struct bsg_job *job, struct Scsi_Host *shost, goto out; } - if (pqi_ctrl_offline(ctrl_info)) { - rc = -ENXIO; - goto out; - } - - if (pqi_ctrl_blocked(ctrl_info)) { - rc = -EBUSY; - goto out; - } - smp_buf = pqi_build_csmi_smp_passthru_buffer(rphy, job); if (!smp_buf) { rc = -ENOMEM; diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c index aef4881d8e21..f8397978f8ab 100644 --- a/drivers/scsi/sni_53c710.c +++ b/drivers/scsi/sni_53c710.c @@ -66,14 +66,12 @@ static int snirm710_probe(struct platform_device *dev) base = res->start; hostdata = kzalloc(sizeof(*hostdata), GFP_KERNEL); - if (!hostdata) { - dev_printk(KERN_ERR, dev, "Failed to allocate host data\n"); + if (!hostdata) return -ENOMEM; - } hostdata->dev = &dev->dev; dma_set_mask(&dev->dev, DMA_BIT_MASK(32)); - hostdata->base = ioremap_nocache(base, 0x100); + hostdata->base = ioremap(base, 0x100); hostdata->differential = 0; hostdata->clock = SNIRM710_CLOCK; diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c index 4664fdf75c0f..0fbb8fe6e521 100644 --- a/drivers/scsi/sr.c +++ b/drivers/scsi/sr.c @@ -38,6 +38,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bio.h> +#include <linux/compat.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/cdrom.h> @@ -598,6 +599,51 @@ out: return ret; } +#ifdef CONFIG_COMPAT +static int sr_block_compat_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, + unsigned long arg) +{ + struct scsi_cd *cd = scsi_cd(bdev->bd_disk); + struct scsi_device *sdev = cd->device; + void __user *argp = compat_ptr(arg); + int ret; + + mutex_lock(&sr_mutex); + + ret = scsi_ioctl_block_when_processing_errors(sdev, cmd, + (mode & FMODE_NDELAY) != 0); + if (ret) + goto out; + + scsi_autopm_get_device(sdev); + + /* + * Send SCSI addressing ioctls directly to mid level, send other + * ioctls to cdrom/block level. + */ + switch (cmd) { + case SCSI_IOCTL_GET_IDLUN: + case SCSI_IOCTL_GET_BUS_NUMBER: + ret = scsi_compat_ioctl(sdev, cmd, argp); + goto put; + } + + ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, (unsigned long)argp); + if (ret != -ENOSYS) + goto put; + + ret = scsi_compat_ioctl(sdev, cmd, argp); + +put: + scsi_autopm_put_device(sdev); + +out: + mutex_unlock(&sr_mutex); + return ret; + +} +#endif + static unsigned int sr_block_check_events(struct gendisk *disk, unsigned int clearing) { @@ -641,12 +687,11 @@ static const struct block_device_operations sr_bdops = .open = sr_block_open, .release = sr_block_release, .ioctl = sr_block_ioctl, +#ifdef CONFIG_COMPAT + .ioctl = sr_block_compat_ioctl, +#endif .check_events = sr_block_check_events, .revalidate_disk = sr_block_revalidate_disk, - /* - * No compat_ioctl for now because sr_block_ioctl never - * seems to pass arbitrary ioctls down to host drivers. - */ }; static int sr_open(struct cdrom_device_info *cdi, int purpose) diff --git a/drivers/scsi/sr_vendor.c b/drivers/scsi/sr_vendor.c index e3b0ce25162b..17a56c87d383 100644 --- a/drivers/scsi/sr_vendor.c +++ b/drivers/scsi/sr_vendor.c @@ -61,6 +61,7 @@ #define VENDOR_NEC 2 #define VENDOR_TOSHIBA 3 #define VENDOR_WRITER 4 /* pre-scsi3 writers */ +#define VENDOR_CYGNAL_85ED 5 /* CD-on-a-chip */ #define VENDOR_TIMEOUT 30*HZ @@ -99,6 +100,23 @@ void sr_vendor_init(Scsi_CD *cd) } else if (!strncmp(vendor, "TOSHIBA", 7)) { cd->vendor = VENDOR_TOSHIBA; + } else if (!strncmp(vendor, "Beurer", 6) && + !strncmp(model, "Gluco Memory", 12)) { + /* The Beurer GL50 evo uses a Cygnal-manufactured CD-on-a-chip + that only accepts a subset of SCSI commands. Most of the + not-implemented commands are fine to fail, but a few, + particularly around the MMC or Audio commands, will put the + device into an unrecoverable state, so they need to be + avoided at all costs. + */ + cd->vendor = VENDOR_CYGNAL_85ED; + cd->cdi.mask |= ( + CDC_MULTI_SESSION | + CDC_CLOSE_TRAY | CDC_OPEN_TRAY | + CDC_LOCK | + CDC_GENERIC_PACKET | + CDC_PLAY_AUDIO + ); } #endif } diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index e3266a64a477..393f3019ccac 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c @@ -22,6 +22,7 @@ static const char *verstr = "20160209"; #include <linux/module.h> +#include <linux/compat.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched/signal.h> @@ -3500,7 +3501,7 @@ out: /* The ioctl command */ -static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +static long st_ioctl_common(struct file *file, unsigned int cmd_in, void __user *p) { int i, cmd_nr, cmd_type, bt; int retval = 0; @@ -3508,7 +3509,6 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) struct scsi_tape *STp = file->private_data; struct st_modedef *STm; struct st_partstat *STps; - void __user *p = (void __user *)arg; if (mutex_lock_interruptible(&STp->lock)) return -ERESTARTSYS; @@ -3800,14 +3800,11 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) if (STp->cleaning_req) mt_status.mt_gstat |= GMT_CLN(0xffffffff); - i = copy_to_user(p, &mt_status, sizeof(struct mtget)); - if (i) { - retval = (-EFAULT); + retval = put_user_mtget(p, &mt_status); + if (retval) goto out; - } STp->recover_reg = 0; /* Clear after read */ - retval = 0; goto out; } /* End of MTIOCGET */ if (cmd_type == _IOC_TYPE(MTIOCPOS) && cmd_nr == _IOC_NR(MTIOCPOS)) { @@ -3821,16 +3818,24 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) goto out; } mt_pos.mt_blkno = blk; - i = copy_to_user(p, &mt_pos, sizeof(struct mtpos)); - if (i) - retval = (-EFAULT); + retval = put_user_mtpos(p, &mt_pos); goto out; } mutex_unlock(&STp->lock); switch (cmd_in) { + case SCSI_IOCTL_STOP_UNIT: + /* unload */ + retval = scsi_ioctl(STp->device, cmd_in, p); + if (!retval) { + STp->rew_at_close = 0; + STp->ready = ST_NO_TAPE; + } + return retval; + case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: break; + default: if ((cmd_in == SG_IO || cmd_in == SCSI_IOCTL_SEND_COMMAND || @@ -3844,30 +3849,46 @@ static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) return i; break; } - retval = scsi_ioctl(STp->device, cmd_in, p); - if (!retval && cmd_in == SCSI_IOCTL_STOP_UNIT) { /* unload */ - STp->rew_at_close = 0; - STp->ready = ST_NO_TAPE; - } - return retval; + return -ENOTTY; out: mutex_unlock(&STp->lock); return retval; } -#ifdef CONFIG_COMPAT -static long st_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +static long st_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) { + void __user *p = (void __user *)arg; struct scsi_tape *STp = file->private_data; - struct scsi_device *sdev = STp->device; - int ret = -ENOIOCTLCMD; - if (sdev->host->hostt->compat_ioctl) { + int ret; + + ret = st_ioctl_common(file, cmd_in, p); + if (ret != -ENOTTY) + return ret; + + return scsi_ioctl(STp->device, cmd_in, p); +} - ret = sdev->host->hostt->compat_ioctl(sdev, cmd, (void __user *)arg); +#ifdef CONFIG_COMPAT +static long st_compat_ioctl(struct file *file, unsigned int cmd_in, unsigned long arg) +{ + void __user *p = compat_ptr(arg); + struct scsi_tape *STp = file->private_data; + int ret; + /* argument conversion is handled using put_user_mtpos/put_user_mtget */ + switch (cmd_in) { + case MTIOCPOS32: + return st_ioctl_common(file, MTIOCPOS, p); + case MTIOCGET32: + return st_ioctl_common(file, MTIOCGET, p); } - return ret; + + ret = st_ioctl_common(file, cmd_in, p); + if (ret != -ENOTTY) + return ret; + + return scsi_compat_ioctl(STp->device, cmd_in, p); } #endif diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ed8b9ac805e6..fb41636519ee 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1727,6 +1727,13 @@ static const struct hv_vmbus_device_id id_table[] = { MODULE_DEVICE_TABLE(vmbus, id_table); +static const struct { guid_t guid; } fc_guid = { HV_SYNTHFC_GUID }; + +static bool hv_dev_is_fc(struct hv_device *hv_dev) +{ + return guid_equal(&fc_guid.guid, &hv_dev->dev_type); +} + static int storvsc_probe(struct hv_device *device, const struct hv_vmbus_device_id *dev_id) { @@ -1835,10 +1842,11 @@ static int storvsc_probe(struct hv_device *device, */ host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); /* + * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. */ - if (stor_device->num_sc != 0) - host->nr_hw_queues = stor_device->num_sc + 1; + if (!dev_is_ide) + host->nr_hw_queues = num_present_cpus(); /* * Set the error handler work queue. @@ -1935,11 +1943,45 @@ static int storvsc_remove(struct hv_device *dev) return 0; } +static int storvsc_suspend(struct hv_device *hv_dev) +{ + struct storvsc_device *stor_device = hv_get_drvdata(hv_dev); + struct Scsi_Host *host = stor_device->host; + struct hv_host_device *host_dev = shost_priv(host); + + storvsc_wait_to_drain(stor_device); + + drain_workqueue(host_dev->handle_error_wq); + + vmbus_close(hv_dev->channel); + + memset(stor_device->stor_chns, 0, + num_possible_cpus() * sizeof(void *)); + + kfree(stor_device->stor_chns); + stor_device->stor_chns = NULL; + + cpumask_clear(&stor_device->alloced_cpus); + + return 0; +} + +static int storvsc_resume(struct hv_device *hv_dev) +{ + int ret; + + ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size, + hv_dev_is_fc(hv_dev)); + return ret; +} + static struct hv_driver storvsc_drv = { .name = KBUILD_MODNAME, .id_table = id_table, .probe = storvsc_probe, .remove = storvsc_remove, + .suspend = storvsc_suspend, + .resume = storvsc_resume, .driver = { .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, diff --git a/drivers/scsi/sun3_scsi.c b/drivers/scsi/sun3_scsi.c index 3d80ab67a626..701b842296f0 100644 --- a/drivers/scsi/sun3_scsi.c +++ b/drivers/scsi/sun3_scsi.c @@ -397,10 +397,12 @@ static int sun3scsi_dma_finish(int write_flag) case CSR_LEFT_3: *vaddr = (dregs->bpack_lo & 0xff00) >> 8; vaddr--; + /* Fall through */ case CSR_LEFT_2: *vaddr = (dregs->bpack_hi & 0x00ff); vaddr--; + /* Fall through */ case CSR_LEFT_1: *vaddr = (dregs->bpack_hi & 0xff00) >> 8; @@ -499,7 +501,7 @@ static struct scsi_host_template sun3_scsi_template = { .eh_host_reset_handler = sun3scsi_host_reset, .can_queue = 16, .this_id = 7, - .sg_tablesize = SG_NONE, + .sg_tablesize = 1, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = NCR5380_CMD_SIZE, @@ -521,7 +523,7 @@ static int __init sun3_scsi_probe(struct platform_device *pdev) sun3_scsi_template.can_queue = setup_can_queue; if (setup_cmd_per_lun > 0) sun3_scsi_template.cmd_per_lun = setup_cmd_per_lun; - if (setup_sg_tablesize >= 0) + if (setup_sg_tablesize > 0) sun3_scsi_template.sg_tablesize = setup_sg_tablesize; if (setup_hostid >= 0) sun3_scsi_template.this_id = setup_hostid & 7; diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c index 440a73eae647..f37df79e37e1 100644 --- a/drivers/scsi/sun3x_esp.c +++ b/drivers/scsi/sun3x_esp.c @@ -190,7 +190,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unlink; - esp->regs = ioremap_nocache(res->start, 0x20); + esp->regs = ioremap(res->start, 0x20); if (!esp->regs) goto fail_unmap_regs; @@ -198,7 +198,7 @@ static int esp_sun3x_probe(struct platform_device *dev) if (!res || !res->start) goto fail_unmap_regs; - esp->dma_regs = ioremap_nocache(res->start, 0x10); + esp->dma_regs = ioremap(res->start, 0x10); esp->command_block = dma_alloc_coherent(esp->dev, 16, &esp->command_block_dma, diff --git a/drivers/scsi/sym53c8xx_2/sym_nvram.c b/drivers/scsi/sym53c8xx_2/sym_nvram.c index dd3f07b31612..d37e2a69136a 100644 --- a/drivers/scsi/sym53c8xx_2/sym_nvram.c +++ b/drivers/scsi/sym53c8xx_2/sym_nvram.c @@ -227,7 +227,7 @@ static void sym_display_Tekram_nvram(struct sym_device *np, Tekram_nvram *nvram) /* * 24C16 EEPROM reading. * - * GPOI0 - data in/data out + * GPIO0 - data in/data out * GPIO1 - clock * Symbios NVRAM wiring now also used by Tekram. */ @@ -524,7 +524,7 @@ static int sym_read_Symbios_nvram(struct sym_device *np, Symbios_nvram *nvram) /* * 93C46 EEPROM reading. * - * GPOI0 - data in + * GPIO0 - data in * GPIO1 - data out * GPIO2 - clock * GPIO4 - chip select @@ -648,7 +648,7 @@ static int sym_read_T93C46_nvram(struct sym_device *np, Tekram_nvram *nvram) { u_char gpcntl, gpreg; u_char old_gpcntl, old_gpreg; - int retv = 1; + int retv; /* save current state of GPCNTL and GPREG */ old_gpreg = INB(np, nc_gpreg); diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 0b845ab7c3bf..d14c2243e02a 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -132,6 +132,16 @@ config SCSI_UFS_HISI Select this if you have UFS controller on Hisilicon chipset. If unsure, say N. +config SCSI_UFS_TI_J721E + tristate "TI glue layer for Cadence UFS Controller" + depends on OF && HAS_IOMEM && (ARCH_K3 || COMPILE_TEST) + help + This selects driver for TI glue layer for Cadence UFS Host + Controller IP. + + Selects this if you have TI platform with UFS controller. + If unsure, say N. + config SCSI_UFS_BSG bool "Universal Flash Storage BSG device node" depends on SCSI_UFSHCD diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile index 2a9097939bcb..94c6c5d7334b 100644 --- a/drivers/scsi/ufs/Makefile +++ b/drivers/scsi/ufs/Makefile @@ -11,3 +11,4 @@ obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o obj-$(CONFIG_SCSI_UFS_MEDIATEK) += ufs-mediatek.o +obj-$(CONFIG_SCSI_UFS_TI_J721E) += ti-j721e-ufs.o diff --git a/drivers/scsi/ufs/cdns-pltfrm.c b/drivers/scsi/ufs/cdns-pltfrm.c index 86dbb723f3ac..56a6a1ed5ec2 100644 --- a/drivers/scsi/ufs/cdns-pltfrm.c +++ b/drivers/scsi/ufs/cdns-pltfrm.c @@ -19,6 +19,85 @@ #define CDNS_UFS_REG_HCLKDIV 0xFC #define CDNS_UFS_REG_PHY_XCFGD1 0x113C +#define CDNS_UFS_MAX_L4_ATTRS 12 + +struct cdns_ufs_host { + /** + * cdns_ufs_dme_attr_val - for storing L4 attributes + */ + u32 cdns_ufs_dme_attr_val[CDNS_UFS_MAX_L4_ATTRS]; +}; + +/** + * cdns_ufs_get_l4_attr - get L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_get_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERDEVICEID), + &host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERCPORTID), + &host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + &host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PROTOCOLID), + &host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTFLAGS), + &host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + &host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + &host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + &host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CPORTMODE), + &host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_get(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + &host->cdns_ufs_dme_attr_val[11]); +} + +/** + * cdns_ufs_set_l4_attr - set L4 attributes on local side + * @hba: per adapter instance + * + */ +static void cdns_ufs_set_l4_attr(struct ufs_hba *hba) +{ + struct cdns_ufs_host *host = ufshcd_get_variant(hba); + + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), 0); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERDEVICEID), + host->cdns_ufs_dme_attr_val[0]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERCPORTID), + host->cdns_ufs_dme_attr_val[1]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TRAFFICCLASS), + host->cdns_ufs_dme_attr_val[2]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PROTOCOLID), + host->cdns_ufs_dme_attr_val[3]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTFLAGS), + host->cdns_ufs_dme_attr_val[4]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_TXTOKENVALUE), + host->cdns_ufs_dme_attr_val[5]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_RXTOKENVALUE), + host->cdns_ufs_dme_attr_val[6]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_LOCALBUFFERSPACE), + host->cdns_ufs_dme_attr_val[7]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_PEERBUFFERSPACE), + host->cdns_ufs_dme_attr_val[8]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CREDITSTOSEND), + host->cdns_ufs_dme_attr_val[9]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CPORTMODE), + host->cdns_ufs_dme_attr_val[10]); + ufshcd_dme_set(hba, UIC_ARG_MIB(T_CONNECTIONSTATE), + host->cdns_ufs_dme_attr_val[11]); +} /** * Sets HCLKDIV register value based on the core_clk @@ -62,23 +141,69 @@ static int cdns_ufs_set_hclkdiv(struct ufs_hba *hba) } /** - * Sets clocks used by the controller + * Called before and after HCE enable bit is set. * @hba: host controller instance - * @on: if true, enable clocks, otherwise disable * @status: notify stage (pre, post change) * * Return zero for success and non-zero for failure */ -static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on, - enum ufs_notify_change_status status) +static int cdns_ufs_hce_enable_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) { - if ((!on) || (status == PRE_CHANGE)) + if (status != PRE_CHANGE) return 0; return cdns_ufs_set_hclkdiv(hba); } /** + * Called around hibern8 enter/exit. + * @hba: host controller instance + * @cmd: UIC Command + * @status: notify stage (pre, post change) + * + */ +static void cdns_ufs_hibern8_notify(struct ufs_hba *hba, enum uic_cmd_dme cmd, + enum ufs_notify_change_status status) +{ + if (status == PRE_CHANGE && cmd == UIC_CMD_DME_HIBER_ENTER) + cdns_ufs_get_l4_attr(hba); + if (status == POST_CHANGE && cmd == UIC_CMD_DME_HIBER_EXIT) + cdns_ufs_set_l4_attr(hba); +} + +/** + * Called before and after Link startup is carried out. + * @hba: host controller instance + * @status: notify stage (pre, post change) + * + * Return zero for success and non-zero for failure + */ +static int cdns_ufs_link_startup_notify(struct ufs_hba *hba, + enum ufs_notify_change_status status) +{ + if (status != PRE_CHANGE) + return 0; + + /* + * Some UFS devices have issues if LCC is enabled. + * So we are setting PA_Local_TX_LCC_Enable to 0 + * before link startup which will make sure that both host + * and device TX LCC are disabled once link startup is + * completed. + */ + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0); + + /* + * Disabling Autohibern8 feature in cadence UFS + * to mask unexpected interrupt trigger. + */ + hba->ahit = 0; + + return 0; +} + +/** * cdns_ufs_init - performs additional ufs initialization * @hba: host controller instance * @@ -87,6 +212,14 @@ static int cdns_ufs_setup_clocks(struct ufs_hba *hba, bool on, static int cdns_ufs_init(struct ufs_hba *hba) { int status = 0; + struct cdns_ufs_host *host; + struct device *dev = hba->dev; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + + if (!host) + return -ENOMEM; + ufshcd_set_variant(hba, host); if (hba->vops && hba->vops->phy_initialization) status = hba->vops->phy_initialization(hba); @@ -114,14 +247,19 @@ static int cdns_ufs_m31_16nm_phy_initialization(struct ufs_hba *hba) static const struct ufs_hba_variant_ops cdns_ufs_pltfm_hba_vops = { .name = "cdns-ufs-pltfm", - .setup_clocks = cdns_ufs_setup_clocks, + .init = cdns_ufs_init, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct ufs_hba_variant_ops cdns_ufs_m31_16nm_pltfm_hba_vops = { .name = "cdns-ufs-pltfm", .init = cdns_ufs_init, - .setup_clocks = cdns_ufs_setup_clocks, + .hce_enable_notify = cdns_ufs_hce_enable_notify, + .link_startup_notify = cdns_ufs_link_startup_notify, .phy_initialization = cdns_ufs_m31_16nm_phy_initialization, + .hibern8_notify = cdns_ufs_hibern8_notify, }; static const struct of_device_id cdns_ufs_of_match[] = { @@ -187,6 +325,7 @@ static const struct dev_pm_ops cdns_ufs_dev_pm_ops = { static struct platform_driver cdns_ufs_pltfrm_driver = { .probe = cdns_ufs_pltfrm_probe, .remove = cdns_ufs_pltfrm_remove, + .shutdown = ufshcd_pltfrm_shutdown, .driver = { .name = "cdns-ufshcd", .pm = &cdns_ufs_dev_pm_ops, diff --git a/drivers/scsi/ufs/ti-j721e-ufs.c b/drivers/scsi/ufs/ti-j721e-ufs.c new file mode 100644 index 000000000000..5216d228cdd9 --- /dev/null +++ b/drivers/scsi/ufs/ti-j721e-ufs.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ +// + +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> + +#define TI_UFS_SS_CTRL 0x4 +#define TI_UFS_SS_RST_N_PCS BIT(0) +#define TI_UFS_SS_CLK_26MHZ BIT(4) + +static int ti_j721e_ufs_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + unsigned long clk_rate; + void __iomem *regbase; + struct clk *clk; + u32 reg = 0; + int ret; + + regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(regbase)) + return PTR_ERR(regbase); + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + pm_runtime_put_noidle(dev); + return ret; + } + + /* Select MPHY refclk frequency */ + clk = devm_clk_get(dev, NULL); + if (IS_ERR(clk)) { + dev_err(dev, "Cannot claim MPHY clock.\n"); + return PTR_ERR(clk); + } + clk_rate = clk_get_rate(clk); + if (clk_rate == 26000000) + reg |= TI_UFS_SS_CLK_26MHZ; + devm_clk_put(dev, clk); + + /* Take UFS slave device out of reset */ + reg |= TI_UFS_SS_RST_N_PCS; + writel(reg, regbase + TI_UFS_SS_CTRL); + + ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, + dev); + if (ret) { + dev_err(dev, "failed to populate child nodes %d\n", ret); + pm_runtime_put_sync(dev); + } + + return ret; +} + +static int ti_j721e_ufs_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); + pm_runtime_put_sync(&pdev->dev); + + return 0; +} + +static const struct of_device_id ti_j721e_ufs_of_match[] = { + { + .compatible = "ti,j721e-ufs", + }, + { }, +}; + +static struct platform_driver ti_j721e_ufs_driver = { + .probe = ti_j721e_ufs_probe, + .remove = ti_j721e_ufs_remove, + .driver = { + .name = "ti-j721e-ufs", + .of_match_table = ti_j721e_ufs_of_match, + }, +}; +module_platform_driver(ti_j721e_ufs_driver); + +MODULE_AUTHOR("Vignesh Raghavendra <vigneshr@ti.com>"); +MODULE_DESCRIPTION("TI UFS host controller glue driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c index f4d1dca962c4..5d6487350a6c 100644 --- a/drivers/scsi/ufs/ufs-hisi.c +++ b/drivers/scsi/ufs/ufs-hisi.c @@ -447,17 +447,12 @@ static int ufs_hisi_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_hisi_get_resource(struct ufs_hisi_host *host) { - struct resource *mem_res; struct device *dev = host->hba->dev; struct platform_device *pdev = to_platform_device(dev); /* get resource of ufs sys ctrl */ - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - host->ufs_sys_ctrl = devm_ioremap_resource(dev, mem_res); - if (IS_ERR(host->ufs_sys_ctrl)) - return PTR_ERR(host->ufs_sys_ctrl); - - return 0; + host->ufs_sys_ctrl = devm_platform_ioremap_resource(pdev, 1); + return PTR_ERR_OR_ZERO(host->ufs_sys_ctrl); } static void ufs_hisi_set_pm_lvl(struct ufs_hba *hba) diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c index 0f6ff33ce52e..53eae5fe2ade 100644 --- a/drivers/scsi/ufs/ufs-mediatek.c +++ b/drivers/scsi/ufs/ufs-mediatek.c @@ -6,16 +6,30 @@ * Peter Wang <peter.wang@mediatek.com> */ +#include <linux/arm-smccc.h> +#include <linux/bitfield.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> #include "ufshcd.h" #include "ufshcd-pltfrm.h" +#include "ufs_quirks.h" #include "unipro.h" #include "ufs-mediatek.h" +#define ufs_mtk_smc(cmd, val, res) \ + arm_smccc_smc(MTK_SIP_UFS_CONTROL, \ + cmd, val, 0, 0, 0, 0, 0, &(res)) + +#define ufs_mtk_ref_clk_notify(on, res) \ + ufs_mtk_smc(UFS_MTK_SIP_REF_CLK_NOTIFICATION, on, res) + +#define ufs_mtk_device_reset_ctrl(high, res) \ + ufs_mtk_smc(UFS_MTK_SIP_DEVICE_RESET, high, res) + static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) { u32 tmp; @@ -81,6 +95,49 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba) return err; } +static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) +{ + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + struct arm_smccc_res res; + unsigned long timeout; + u32 value; + + if (host->ref_clk_enabled == on) + return 0; + + if (on) { + ufs_mtk_ref_clk_notify(on, res); + ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); + } else { + ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); + } + + /* Wait for ack */ + timeout = jiffies + msecs_to_jiffies(REFCLK_REQ_TIMEOUT_MS); + do { + value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); + + /* Wait until ack bit equals to req bit */ + if (((value & REFCLK_ACK) >> 1) == (value & REFCLK_REQUEST)) + goto out; + + usleep_range(100, 200); + } while (time_before(jiffies, timeout)); + + dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); + + ufs_mtk_ref_clk_notify(host->ref_clk_enabled, res); + + return -ETIMEDOUT; + +out: + host->ref_clk_enabled = on; + if (!on) + ufs_mtk_ref_clk_notify(on, res); + + return 0; +} + /** * ufs_mtk_setup_clocks - enables/disable clocks * @hba: host controller instance @@ -105,12 +162,16 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, switch (status) { case PRE_CHANGE: - if (!on) + if (!on) { + ufs_mtk_setup_ref_clk(hba, on); ret = phy_power_off(host->mphy); + } break; case POST_CHANGE: - if (on) + if (on) { ret = phy_power_on(host->mphy); + ufs_mtk_setup_ref_clk(hba, on); + } break; } @@ -147,6 +208,12 @@ static int ufs_mtk_init(struct ufs_hba *hba) if (err) goto out_variant_clear; + /* Enable runtime autosuspend */ + hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; + + /* Enable clock-gating */ + hba->caps |= UFSHCD_CAP_CLK_GATING; + /* * ufshcd_vops_init() is invoked after * ufshcd_setup_clock(true) in ufshcd_hba_init() thus @@ -235,6 +302,23 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) return ret; } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) + ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + else + ah_ms = 10; + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = ah_ms + 5; + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + static int ufs_mtk_post_link(struct ufs_hba *hba) { /* disable device LCC */ @@ -243,6 +327,15 @@ static int ufs_mtk_post_link(struct ufs_hba *hba) /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); + /* configure auto-hibern8 timer to 10ms */ + if (ufshcd_is_auto_hibern8_supported(hba)) { + ufshcd_auto_hibern8_update(hba, + FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3)); + } + + ufs_mtk_setup_clk_gating(hba); + return 0; } @@ -266,12 +359,86 @@ static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, return ret; } +static void ufs_mtk_device_reset(struct ufs_hba *hba) +{ + struct arm_smccc_res res; + + ufs_mtk_device_reset_ctrl(0, res); + + /* + * The reset signal is active low. UFS devices shall detect + * more than or equal to 1us of positive or negative RST_n + * pulse width. + * + * To be on safe side, keep the reset low for at least 10us. + */ + usleep_range(10, 15); + + ufs_mtk_device_reset_ctrl(1, res); + + /* Some devices may need time to respond to rst_n */ + usleep_range(10000, 15000); + + dev_info(hba->dev, "device reset done\n"); +} + +static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_hba_enable(hba); + if (err) + return err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + if (err) + return err; + + err = ufshcd_uic_hibern8_exit(hba); + if (!err) + ufshcd_set_link_active(hba); + else + return err; + + err = ufshcd_make_hba_operational(hba); + if (err) + return err; + + return 0; +} + +static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) +{ + int err; + + err = ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 1); + if (err) { + /* Resume UniPro state for following error recovery */ + ufshcd_dme_set(hba, + UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0), + 0); + return err; + } + + return 0; +} + static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) { + int err; struct ufs_mtk_host *host = ufshcd_get_variant(hba); - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + err = ufs_mtk_link_set_lpm(hba); + if (err) + return -EAGAIN; phy_power_off(host->mphy); + ufs_mtk_setup_ref_clk(hba, false); + } return 0; } @@ -279,9 +446,40 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op) static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); + int err; - if (ufshcd_is_link_hibern8(hba)) + if (ufshcd_is_link_hibern8(hba)) { + ufs_mtk_setup_ref_clk(hba, true); phy_power_on(host->mphy); + err = ufs_mtk_link_set_hpm(hba); + if (err) + return err; + } + + return 0; +} + +static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) +{ + ufshcd_dump_regs(hba, REG_UFS_REFCLK_CTRL, 0x4, "Ref-Clk Ctrl "); + + ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); + + ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, + REG_UFS_REJECT_MON - REG_UFS_MPHYCTRL + 4, + "MPHY Ctrl "); + + /* Direct debugging information to REG_MTK_PROBE */ + ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); + ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); +} + +static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + if (dev_info->wmanufacturerid == UFS_VENDOR_SAMSUNG) + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); return 0; } @@ -298,8 +496,11 @@ static struct ufs_hba_variant_ops ufs_hba_mtk_vops = { .setup_clocks = ufs_mtk_setup_clocks, .link_startup_notify = ufs_mtk_link_startup_notify, .pwr_change_notify = ufs_mtk_pwr_change_notify, + .apply_dev_quirks = ufs_mtk_apply_dev_quirks, .suspend = ufs_mtk_suspend, .resume = ufs_mtk_resume, + .dbg_register_dump = ufs_mtk_dbg_register_dump, + .device_reset = ufs_mtk_device_reset, }; /** diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h index 19f8c42fe06f..fccdd979d6fb 100644 --- a/drivers/scsi/ufs/ufs-mediatek.h +++ b/drivers/scsi/ufs/ufs-mediatek.h @@ -6,6 +6,30 @@ #ifndef _UFS_MEDIATEK_H #define _UFS_MEDIATEK_H +#include <linux/bitops.h> +#include <linux/soc/mediatek/mtk_sip_svc.h> + +/* + * Vendor specific UFSHCI Registers + */ +#define REG_UFS_REFCLK_CTRL 0x144 +#define REG_UFS_EXTREG 0x2100 +#define REG_UFS_MPHYCTRL 0x2200 +#define REG_UFS_REJECT_MON 0x22AC +#define REG_UFS_DEBUG_SEL 0x22C0 +#define REG_UFS_PROBE 0x22C8 + +/* + * Ref-clk control + * + * Values for register REG_UFS_REFCLK_CTRL + */ +#define REFCLK_RELEASE 0x0 +#define REFCLK_REQUEST BIT(0) +#define REFCLK_ACK BIT(1) + +#define REFCLK_REQ_TIMEOUT_MS 3 + /* * Vendor specific pre-defined parameters */ @@ -30,6 +54,13 @@ #define VS_UNIPROPOWERDOWNCONTROL 0xD0A8 /* + * SiP commands + */ +#define MTK_SIP_UFS_CONTROL MTK_SIP_SMC_CMD(0x276) +#define UFS_MTK_SIP_DEVICE_RESET BIT(1) +#define UFS_MTK_SIP_REF_CLK_NOTIFICATION BIT(3) + +/* * VS_DEBUGCLOCKENABLE */ enum { @@ -48,6 +79,7 @@ enum { struct ufs_mtk_host { struct ufs_hba *hba; struct phy *mphy; + bool ref_clk_enabled; }; #endif /* !_UFS_MEDIATEK_H */ diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c index ee4b1da1e223..c69c29a1ceb9 100644 --- a/drivers/scsi/ufs/ufs-qcom.c +++ b/drivers/scsi/ufs/ufs-qcom.c @@ -8,6 +8,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> +#include <linux/gpio/consumer.h> #include <linux/reset-controller.h> #include "ufshcd.h" @@ -245,6 +246,44 @@ static void ufs_qcom_select_unipro_mode(struct ufs_qcom_host *host) mb(); } +/** + * ufs_qcom_host_reset - reset host controller and PHY + */ +static int ufs_qcom_host_reset(struct ufs_hba *hba) +{ + int ret = 0; + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + if (!host->core_reset) { + dev_warn(hba->dev, "%s: reset control not set\n", __func__); + goto out; + } + + ret = reset_control_assert(host->core_reset); + if (ret) { + dev_err(hba->dev, "%s: core_reset assert failed, err = %d\n", + __func__, ret); + goto out; + } + + /* + * The hardware requirement for delay between assert/deassert + * is at least 3-4 sleep clock (32.7KHz) cycles, which comes to + * ~125us (4/32768). To be on the safe side add 200us delay. + */ + usleep_range(200, 210); + + ret = reset_control_deassert(host->core_reset); + if (ret) + dev_err(hba->dev, "%s: core_reset deassert failed, err = %d\n", + __func__, ret); + + usleep_range(1000, 1100); + +out: + return ret; +} + static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); @@ -253,6 +292,12 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba) bool is_rate_B = (UFS_QCOM_LIMIT_HS_RATE == PA_HS_MODE_B) ? true : false; + /* Reset UFS Host Controller and PHY */ + ret = ufs_qcom_host_reset(hba); + if (ret) + dev_warn(hba->dev, "%s: host reset returned %d\n", + __func__, ret); + if (is_rate_B) phy_set_mode(phy, PHY_MODE_UFS_HS_B); @@ -800,7 +845,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_max_params, struct ufs_pa_layer_attr *dev_req_params) { - u32 val; struct ufs_qcom_host *host = ufshcd_get_variant(hba); struct ufs_dev_params ufs_qcom_cap; int ret = 0; @@ -869,8 +913,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba, ret = -EINVAL; } - val = ~(MAX_U32 << dev_req_params->lane_tx); - /* cache the power mode parameters to use internally */ memcpy(&host->dev_req_params, dev_req_params, sizeof(*dev_req_params)); @@ -1103,6 +1145,15 @@ static int ufs_qcom_init(struct ufs_hba *hba) host->hba = hba; ufshcd_set_variant(hba, host); + /* Setup the reset control of HCI */ + host->core_reset = devm_reset_control_get(hba->dev, "rst"); + if (IS_ERR(host->core_reset)) { + err = PTR_ERR(host->core_reset); + dev_warn(dev, "Failed to get reset control %d\n", err); + host->core_reset = NULL; + err = 0; + } + /* Fire up the reset controller. Failure here is non-fatal. */ host->rcdev.of_node = dev->of_node; host->rcdev.ops = &ufs_qcom_reset_ops; @@ -1140,6 +1191,15 @@ static int ufs_qcom_init(struct ufs_hba *hba) } } + host->device_reset = devm_gpiod_get_optional(dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(host->device_reset)) { + err = PTR_ERR(host->device_reset); + if (err != -EPROBE_DEFER) + dev_err(dev, "failed to acquire reset gpio: %d\n", err); + goto out_variant_clear; + } + err = ufs_qcom_bus_register(host); if (err) goto out_variant_clear; @@ -1546,12 +1606,37 @@ static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba) } /** + * ufs_qcom_device_reset() - toggle the (optional) device reset line + * @hba: per-adapter instance + * + * Toggles the (optional) reset line to reset the attached device. + */ +static void ufs_qcom_device_reset(struct ufs_hba *hba) +{ + struct ufs_qcom_host *host = ufshcd_get_variant(hba); + + /* reset gpio is optional */ + if (!host->device_reset) + return; + + /* + * The UFS device shall detect reset pulses of 1us, sleep for 10us to + * be on the safe side. + */ + gpiod_set_value_cansleep(host->device_reset, 1); + usleep_range(10, 15); + + gpiod_set_value_cansleep(host->device_reset, 0); + usleep_range(10, 15); +} + +/** * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * * The variant operations configure the necessary controller and PHY * handshake during initialization. */ -static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { +static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .name = "qcom", .init = ufs_qcom_init, .exit = ufs_qcom_exit, @@ -1565,6 +1650,7 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .suspend = ufs_qcom_suspend, .resume = ufs_qcom_resume, .dbg_register_dump = ufs_qcom_dump_dbg_regs, + .device_reset = ufs_qcom_device_reset, }; /** diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h index 001915d1e0e4..2d95e7cc7187 100644 --- a/drivers/scsi/ufs/ufs-qcom.h +++ b/drivers/scsi/ufs/ufs-qcom.h @@ -6,6 +6,7 @@ #define UFS_QCOM_H_ #include <linux/reset-controller.h> +#include <linux/reset.h> #define MAX_UFS_QCOM_HOSTS 1 #define MAX_U32 (~(u32)0) @@ -195,6 +196,8 @@ struct ufs_qcom_testbus { u8 select_minor; }; +struct gpio_desc; + struct ufs_qcom_host { /* * Set this capability if host controller supports the QUniPro mode @@ -231,7 +234,11 @@ struct ufs_qcom_host { u32 dbg_print_en; struct ufs_qcom_testbus testbus; + /* Reset control of HCI */ + struct reset_control *core_reset; struct reset_controller_dev rcdev; + + struct gpio_desc *device_reset; }; static inline u32 diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c index f478685122ff..dbdf8b01abed 100644 --- a/drivers/scsi/ufs/ufs-sysfs.c +++ b/drivers/scsi/ufs/ufs-sysfs.c @@ -118,23 +118,6 @@ static ssize_t spm_target_link_state_show(struct device *dev, ufs_pm_lvl_states[hba->spm_lvl].link_state)); } -static void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) -{ - unsigned long flags; - - if (!ufshcd_is_auto_hibern8_supported(hba)) - return; - - spin_lock_irqsave(hba->host->host_lock, flags); - if (hba->ahit == ahit) - goto out_unlock; - hba->ahit = ahit; - if (!pm_runtime_suspended(hba->dev)) - ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); -out_unlock: - spin_unlock_irqrestore(hba->host->host_lock, flags); -} - /* Convert Auto-Hibernate Idle Timer register value to microseconds */ static int ufshcd_ahit_to_us(u32 ahit) { @@ -571,9 +554,10 @@ static ssize_t _name##_show(struct device *dev, \ int ret; \ int desc_len = QUERY_DESC_MAX_SIZE; \ u8 *desc_buf; \ + \ desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \ - if (!desc_buf) \ - return -ENOMEM; \ + if (!desc_buf) \ + return -ENOMEM; \ ret = ufshcd_query_descriptor_retry(hba, \ UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \ 0, 0, desc_buf, &desc_len); \ @@ -582,14 +566,13 @@ static ssize_t _name##_show(struct device *dev, \ goto out; \ } \ index = desc_buf[DEVICE_DESC_PARAM##_pname]; \ - memset(desc_buf, 0, QUERY_DESC_MAX_SIZE); \ - if (ufshcd_read_string_desc(hba, index, desc_buf, \ - QUERY_DESC_MAX_SIZE, true)) { \ - ret = -EINVAL; \ + kfree(desc_buf); \ + desc_buf = NULL; \ + ret = ufshcd_read_string_desc(hba, index, &desc_buf, \ + SD_ASCII_STD); \ + if (ret < 0) \ goto out; \ - } \ - ret = snprintf(buf, PAGE_SIZE, "%s\n", \ - desc_buf + QUERY_DESC_HDR_SIZE); \ + ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \ out: \ kfree(desc_buf); \ return ret; \ @@ -730,7 +713,7 @@ static ssize_t _pname##_show(struct device *dev, \ struct scsi_device *sdev = to_scsi_device(dev); \ struct ufs_hba *hba = shost_priv(sdev->host); \ u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \ - if (!ufs_is_valid_unit_desc_lun(lun)) \ + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \ return -EINVAL; \ return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \ lun, _duname##_DESC_PARAM##_puname, buf, _size); \ diff --git a/drivers/scsi/ufs/ufs-sysfs.h b/drivers/scsi/ufs/ufs-sysfs.h index e5621e59a432..0f4e750a6748 100644 --- a/drivers/scsi/ufs/ufs-sysfs.h +++ b/drivers/scsi/ufs/ufs-sysfs.h @@ -1,5 +1,5 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Copyright (C) 2018 Western Digital Corporation +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2018 Western Digital Corporation */ #ifndef __UFS_SYSFS_H__ diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index 99a9c4d16f6b..cfe380348bf0 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h @@ -63,7 +63,6 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) -#define UFS_UPIU_MAX_GENERAL_LUN 8 /* Well known logical unit id in LUN field of UPIU */ enum { @@ -499,9 +498,9 @@ struct ufs_query_res { #define UFS_VREG_VCC_MAX_UV 3600000 /* uV */ #define UFS_VREG_VCC_1P8_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCC_1P8_MAX_UV 1950000 /* uV */ -#define UFS_VREG_VCCQ_MIN_UV 1100000 /* uV */ -#define UFS_VREG_VCCQ_MAX_UV 1300000 /* uV */ -#define UFS_VREG_VCCQ2_MIN_UV 1650000 /* uV */ +#define UFS_VREG_VCCQ_MIN_UV 1140000 /* uV */ +#define UFS_VREG_VCCQ_MAX_UV 1260000 /* uV */ +#define UFS_VREG_VCCQ2_MIN_UV 1700000 /* uV */ #define UFS_VREG_VCCQ2_MAX_UV 1950000 /* uV */ /* @@ -530,28 +529,28 @@ struct ufs_dev_info { bool f_power_on_wp_en; /* Keeps information if any of the LU is power on write protected */ bool is_lu_power_on_wp; -}; - -#define MAX_MODEL_LEN 16 -/** - * ufs_dev_desc - ufs device details from the device descriptor - * - * @wmanufacturerid: card details - * @model: card model - */ -struct ufs_dev_desc { + /* Maximum number of general LU supported by the UFS device */ + u8 max_lu_supported; u16 wmanufacturerid; - char model[MAX_MODEL_LEN + 1]; + /*UFS device Product Name */ + u8 *model; }; /** * ufs_is_valid_unit_desc_lun - checks if the given LUN has a unit descriptor + * @dev_info: pointer of instance of struct ufs_dev_info * @lun: LU number to check * @return: true if the lun has a matching unit descriptor, false otherwise */ -static inline bool ufs_is_valid_unit_desc_lun(u8 lun) +static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, + u8 lun) { - return lun == UFS_UPIU_RPMB_WLUN || (lun < UFS_UPIU_MAX_GENERAL_LUN); + if (!dev_info || !dev_info->max_lu_supported) { + pr_err("Max General LU supported by UFS isn't initialized\n"); + return false; + } + + return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); } #endif /* End of Header */ diff --git a/drivers/scsi/ufs/ufs_bsg.c b/drivers/scsi/ufs/ufs_bsg.c index a9344eb4e047..53dd87628cbe 100644 --- a/drivers/scsi/ufs/ufs_bsg.c +++ b/drivers/scsi/ufs/ufs_bsg.c @@ -98,6 +98,8 @@ static int ufs_bsg_request(struct bsg_job *job) bsg_reply->reply_payload_rcv_len = 0; + pm_runtime_get_sync(hba->dev); + msgcode = bsg_request->msgcode; switch (msgcode) { case UPIU_TRANSACTION_QUERY_REQ: @@ -135,6 +137,8 @@ static int ufs_bsg_request(struct bsg_job *job) break; } + pm_runtime_put_sync(hba->dev); + if (!desc_buff) goto out; @@ -158,6 +162,7 @@ out: /** * ufs_bsg_remove - detach and remove the added ufs-bsg node + * @hba: per adapter object * * Should be called when unloading the driver. */ @@ -198,7 +203,7 @@ int ufs_bsg_probe(struct ufs_hba *hba) bsg_dev->parent = get_device(parent); bsg_dev->release = ufs_bsg_node_release; - dev_set_name(bsg_dev, "ufs-bsg"); + dev_set_name(bsg_dev, "ufs-bsg%u", shost->host_no); ret = device_add(bsg_dev); if (ret) diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h index fe6cad9b2a0d..d0ab147f98d3 100644 --- a/drivers/scsi/ufs/ufs_quirks.h +++ b/drivers/scsi/ufs/ufs_quirks.h @@ -22,16 +22,17 @@ * @quirk: device quirk */ struct ufs_dev_fix { - struct ufs_dev_desc card; + u16 wmanufacturerid; + u8 *model; unsigned int quirk; }; -#define END_FIX { { 0 }, 0 } +#define END_FIX { } /* add specific device quirk */ #define UFS_FIX(_vendor, _model, _quirk) { \ - .card.wmanufacturerid = (_vendor),\ - .card.model = (_model), \ + .wmanufacturerid = (_vendor),\ + .model = (_model), \ .quirk = (_quirk), \ } diff --git a/drivers/scsi/ufs/ufshcd-dwc.c b/drivers/scsi/ufs/ufshcd-dwc.c index fb9e2ff4f8d2..6a901da2d15a 100644 --- a/drivers/scsi/ufs/ufshcd-dwc.c +++ b/drivers/scsi/ufs/ufshcd-dwc.c @@ -80,7 +80,7 @@ static int ufshcd_dwc_link_is_up(struct ufs_hba *hba) */ static int ufshcd_dwc_connection_setup(struct ufs_hba *hba) { - const struct ufshcd_dme_attr_val setup_attrs[] = { + static const struct ufshcd_dme_attr_val setup_attrs[] = { { UIC_ARG_MIB(T_CONNECTIONSTATE), 0, DME_LOCAL }, { UIC_ARG_MIB(N_DEVICEID), 0, DME_LOCAL }, { UIC_ARG_MIB(N_DEVICEID_VALID), 0, DME_LOCAL }, diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c index d7d521b394c3..76f9be71c31b 100644 --- a/drivers/scsi/ufs/ufshcd-pltfrm.c +++ b/drivers/scsi/ufs/ufshcd-pltfrm.c @@ -391,12 +391,10 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, { struct ufs_hba *hba; void __iomem *mmio_base; - struct resource *mem_res; int irq, err; struct device *dev = &pdev->dev; - mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mmio_base = devm_ioremap_resource(dev, mem_res); + mmio_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mmio_base)) { err = PTR_ERR(mmio_base); goto out; @@ -404,7 +402,6 @@ int ufshcd_pltfrm_init(struct platform_device *pdev, irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(dev, "IRQ resource not available\n"); err = -ENODEV; goto out; } diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e274053109d0..abd0e6b05f79 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -88,6 +88,9 @@ /* Interrupt aggregation default timeout, unit: 40us */ #define INT_AGGR_DEF_TO 0x02 +/* default delay of autosuspend: 2000 ms */ +#define RPM_AUTOSUSPEND_DELAY_MS 2000 + #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ int _ret; \ @@ -114,7 +117,7 @@ int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, if (offset % 4 != 0 || len % 4 != 0) /* keep readl happy */ return -EINVAL; - regs = kzalloc(len, GFP_KERNEL); + regs = kzalloc(len, GFP_ATOMIC); if (!regs) return -ENOMEM; @@ -237,17 +240,16 @@ static struct ufs_dev_fix ufs_fixups[] = { END_FIX }; -static void ufshcd_tmc_handler(struct ufs_hba *hba); +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba); static void ufshcd_async_scan(void *data, async_cookie_t cookie); static int ufshcd_reset_and_restore(struct ufs_hba *hba); static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd); static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag); static void ufshcd_hba_exit(struct ufs_hba *hba); -static int ufshcd_probe_hba(struct ufs_hba *hba); +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async); static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on, bool skip_ref_clk); static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on); -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba); static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba); static int ufshcd_host_reset_and_restore(struct ufs_hba *hba); @@ -263,26 +265,18 @@ static inline bool ufshcd_valid_tag(struct ufs_hba *hba, int tag) return tag >= 0 && tag < hba->nutrs; } -static inline int ufshcd_enable_irq(struct ufs_hba *hba) +static inline void ufshcd_enable_irq(struct ufs_hba *hba) { - int ret = 0; - if (!hba->is_irq_enabled) { - ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, - hba); - if (ret) - dev_err(hba->dev, "%s: request_irq failed, ret=%d\n", - __func__, ret); + enable_irq(hba->irq); hba->is_irq_enabled = true; } - - return ret; } static inline void ufshcd_disable_irq(struct ufs_hba *hba) { if (hba->is_irq_enabled) { - free_irq(hba->irq, hba); + disable_irq(hba->irq); hba->is_irq_enabled = false; } } @@ -299,16 +293,6 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba) scsi_block_requests(hba->host); } -/* replace non-printable or non-ASCII characters with spaces */ -static inline void ufshcd_remove_non_printable(char *val) -{ - if (!val) - return; - - if (*val < 0x20 || *val > 0x7e) - *val = ' '; -} - static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag, const char *str) { @@ -342,27 +326,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba, u8 opcode = 0; u32 intr, doorbell; struct ufshcd_lrb *lrbp = &hba->lrb[tag]; + struct scsi_cmnd *cmd = lrbp->cmd; int transfer_len = -1; if (!trace_ufshcd_command_enabled()) { /* trace UPIU W/O tracing command */ - if (lrbp->cmd) + if (cmd) ufshcd_add_cmd_upiu_trace(hba, tag, str); return; } - if (lrbp->cmd) { /* data phase exists */ + if (cmd) { /* data phase exists */ /* trace UPIU also */ ufshcd_add_cmd_upiu_trace(hba, tag, str); - opcode = (u8)(*lrbp->cmd->cmnd); + opcode = cmd->cmnd[0]; if ((opcode == READ_10) || (opcode == WRITE_10)) { /* * Currently we only fully trace read(10) and write(10) * commands */ - if (lrbp->cmd->request && lrbp->cmd->request->bio) - lba = - lrbp->cmd->request->bio->bi_iter.bi_sector; + if (cmd->request && cmd->request->bio) + lba = cmd->request->bio->bi_iter.bi_sector; transfer_len = be32_to_cpu( lrbp->ucd_req_ptr->sc.exp_data_transfer_len); } @@ -390,24 +374,25 @@ static void ufshcd_print_clk_freqs(struct ufs_hba *hba) } } -static void ufshcd_print_uic_err_hist(struct ufs_hba *hba, - struct ufs_uic_err_reg_hist *err_hist, char *err_name) +static void ufshcd_print_err_hist(struct ufs_hba *hba, + struct ufs_err_reg_hist *err_hist, + char *err_name) { int i; bool found = false; - for (i = 0; i < UIC_ERR_REG_HIST_LENGTH; i++) { - int p = (i + err_hist->pos) % UIC_ERR_REG_HIST_LENGTH; + for (i = 0; i < UFS_ERR_REG_HIST_LENGTH; i++) { + int p = (i + err_hist->pos) % UFS_ERR_REG_HIST_LENGTH; - if (err_hist->reg[p] == 0) + if (err_hist->tstamp[p] == 0) continue; - dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, i, + dev_err(hba->dev, "%s[%d] = 0x%x at %lld us\n", err_name, p, err_hist->reg[p], ktime_to_us(err_hist->tstamp[p])); found = true; } if (!found) - dev_err(hba->dev, "No record of %s uic errors\n", err_name); + dev_err(hba->dev, "No record of %s\n", err_name); } static void ufshcd_print_host_regs(struct ufs_hba *hba) @@ -423,16 +408,26 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba) ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp), hba->ufs_stats.hibern8_exit_cnt); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); - ufshcd_print_uic_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.nl_err, "nl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.tl_err, "tl_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dme_err, "dme_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.auto_hibern8_err, + "auto_hibern8_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.fatal_err, "fatal_err"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.link_startup_err, + "link_startup_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.resume_err, "resume_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.suspend_err, + "suspend_fail"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.dev_reset, "dev_reset"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset"); + ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort"); ufshcd_print_clk_freqs(hba); - if (hba->vops && hba->vops->dbg_register_dump) - hba->vops->dbg_register_dump(hba); + ufshcd_vops_dbg_register_dump(hba); } static @@ -492,8 +487,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap) static void ufshcd_print_host_state(struct ufs_hba *hba) { dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state); - dev_err(hba->dev, "lrb in use=0x%lx, outstanding reqs=0x%lx tasks=0x%lx\n", - hba->lrb_in_use, hba->outstanding_reqs, hba->outstanding_tasks); + dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n", + hba->outstanding_reqs, hba->outstanding_tasks); dev_err(hba->dev, "saved_err=0x%x, saved_uic_err=0x%x\n", hba->saved_err, hba->saved_uic_err); dev_err(hba->dev, "Device power mode=%d, UIC link state=%d\n", @@ -641,40 +636,6 @@ static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_get_tm_free_slot - get a free slot for task management request - * @hba: per adapter instance - * @free_slot: pointer to variable with available slot value - * - * Get a free tag and lock it until ufshcd_put_tm_slot() is called. - * Returns 0 if free slot is not available, else return 1 with tag value - * in @free_slot. - */ -static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot) -{ - int tag; - bool ret = false; - - if (!free_slot) - goto out; - - do { - tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs); - if (tag >= hba->nutmrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use)); - - *free_slot = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot) -{ - clear_bit_unlock(slot, &hba->tm_slots_in_use); -} - -/** * ufshcd_utrl_clear - Clear a bit in UTRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared @@ -1268,6 +1229,24 @@ out: return ret; } +static bool ufshcd_is_busy(struct request *req, void *priv, bool reserved) +{ + int *busy = priv; + + WARN_ON_ONCE(reserved); + (*busy)++; + return false; +} + +/* Whether or not any tag is in use by a request that is in progress. */ +static bool ufshcd_any_tag_in_use(struct ufs_hba *hba) +{ + struct request_queue *q = hba->cmd_queue; + int busy = 0; + + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_is_busy, &busy); + return busy; +} static int ufshcd_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *stat) @@ -1485,6 +1464,8 @@ static void ufshcd_ungate_work(struct work_struct *work) spin_unlock_irqrestore(hba->host->host_lock, flags); ufshcd_setup_clocks(hba, true); + ufshcd_enable_irq(hba); + /* Exit from hibern8 */ if (ufshcd_can_hibern8_during_gating(hba)) { /* Prevent gating in this path */ @@ -1605,7 +1586,7 @@ static void ufshcd_gate_work(struct work_struct *work) * state to CLKS_ON. */ if (hba->clk_gating.is_suspended || - (hba->clk_gating.state == REQ_CLKS_ON)) { + (hba->clk_gating.state != REQ_CLKS_OFF)) { hba->clk_gating.state = CLKS_ON; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); @@ -1614,7 +1595,7 @@ static void ufshcd_gate_work(struct work_struct *work) if (hba->clk_gating.active_reqs || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done) goto rel_lock; @@ -1631,6 +1612,8 @@ static void ufshcd_gate_work(struct work_struct *work) ufshcd_set_link_hibern8(hba); } + ufshcd_disable_irq(hba); + if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); else @@ -1668,7 +1651,7 @@ static void __ufshcd_release(struct ufs_hba *hba) if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL - || hba->lrb_in_use || hba->outstanding_tasks + || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks || hba->active_uic_cmd || hba->uic_async_done || ufshcd_eh_in_progress(hba)) return; @@ -1876,12 +1859,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) { hba->lrb[task_tag].issue_time_stamp = ktime_get(); hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0); + ufshcd_add_command_trace(hba, task_tag, "send"); ufshcd_clk_scaling_start_busy(hba); __set_bit(task_tag, &hba->outstanding_reqs); ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); /* Make sure that doorbell is committed immediately */ wmb(); - ufshcd_add_command_trace(hba, task_tag, "send"); } /** @@ -1933,8 +1916,8 @@ int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) memcpy(hba->dev_cmd.query.descriptor, descp, resp_len); } else { dev_warn(hba->dev, - "%s: Response size is bigger than buffer", - __func__); + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, buf_len); return -EINVAL; } } @@ -2234,6 +2217,7 @@ static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp, static void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) { + struct scsi_cmnd *cmd = lrbp->cmd; struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr; unsigned short cdb_len; @@ -2247,12 +2231,11 @@ void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags) /* Total EHS length and Data segment length will be zero */ ucd_req_ptr->header.dword_2 = 0; - ucd_req_ptr->sc.exp_data_transfer_len = - cpu_to_be32(lrbp->cmd->sdb.length); + ucd_req_ptr->sc.exp_data_transfer_len = cpu_to_be32(cmd->sdb.length); - cdb_len = min_t(unsigned short, lrbp->cmd->cmd_len, UFS_CDB_SIZE); + cdb_len = min_t(unsigned short, cmd->cmd_len, UFS_CDB_SIZE); memset(ucd_req_ptr->sc.cdb, 0, UFS_CDB_SIZE); - memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd, cdb_len); + memcpy(ucd_req_ptr->sc.cdb, cmd->cmnd, cdb_len); memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); } @@ -2438,22 +2421,9 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) hba->req_abort_count = 0; - /* acquire the tag to make sure device cmds don't use it */ - if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) { - /* - * Dev manage command in progress, requeue the command. - * Requeuing the command helps in cases where the request *may* - * find different tag instead of waiting for dev manage command - * completion. - */ - err = SCSI_MLQUEUE_HOST_BUSY; - goto out; - } - err = ufshcd_hold(hba, true); if (err) { err = SCSI_MLQUEUE_HOST_BUSY; - clear_bit_unlock(tag, &hba->lrb_in_use); goto out; } WARN_ON(hba->clk_gating.state != CLKS_ON); @@ -2474,7 +2444,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) err = ufshcd_map_sg(hba, lrbp); if (err) { lrbp->cmd = NULL; - clear_bit_unlock(tag, &hba->lrb_in_use); + ufshcd_release(hba); goto out; } /* Make sure descriptors are ready before ringing the doorbell */ @@ -2622,44 +2592,6 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, } /** - * ufshcd_get_dev_cmd_tag - Get device management command tag - * @hba: per-adapter instance - * @tag_out: pointer to variable with available slot value - * - * Get a free slot and lock it until device management command - * completes. - * - * Returns false if free slot is unavailable for locking, else - * return true with tag value in @tag. - */ -static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out) -{ - int tag; - bool ret = false; - unsigned long tmp; - - if (!tag_out) - goto out; - - do { - tmp = ~hba->lrb_in_use; - tag = find_last_bit(&tmp, hba->nutrs); - if (tag >= hba->nutrs) - goto out; - } while (test_and_set_bit_lock(tag, &hba->lrb_in_use)); - - *tag_out = tag; - ret = true; -out: - return ret; -} - -static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) -{ - clear_bit_unlock(tag, &hba->lrb_in_use); -} - -/** * ufshcd_exec_dev_cmd - API for sending device management requests * @hba: UFS hba * @cmd_type: specifies the type (NOP, Query...) @@ -2671,6 +2603,8 @@ static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag) static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, enum dev_cmd_type cmd_type, int timeout) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err; int tag; @@ -2684,7 +2618,13 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by SCSI request timeout. */ - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -2709,8 +2649,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, err ? "query_complete_err" : "query_complete"); out_put_tag: - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -2913,7 +2853,7 @@ static int ufshcd_query_attr_retry(struct ufs_hba *hba, int ret = 0; u32 retries; - for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { + for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) { ret = ufshcd_query_attr(hba, opcode, idn, index, selector, attr_val); if (ret) @@ -2984,10 +2924,10 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba, goto out_unlock; } - hba->dev_cmd.query.descriptor = NULL; *buf_len = be16_to_cpu(response->upiu_res.length); out_unlock: + hba->dev_cmd.query.descriptor = NULL; mutex_unlock(&hba->dev_cmd.lock); out: ufshcd_release(hba); @@ -3199,67 +3139,85 @@ out: static inline int ufshcd_read_desc(struct ufs_hba *hba, enum desc_idn desc_id, int desc_index, - u8 *buf, + void *buf, u32 size) { return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size); } -static inline int ufshcd_read_power_desc(struct ufs_hba *hba, - u8 *buf, - u32 size) -{ - return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size); -} -static int ufshcd_read_device_desc(struct ufs_hba *hba, u8 *buf, u32 size) +/** + * struct uc_string_id - unicode string + * + * @len: size of this descriptor inclusive + * @type: descriptor type + * @uc: unicode string character + */ +struct uc_string_id { + u8 len; + u8 type; + wchar_t uc[0]; +} __packed; + +/* replace non-printable or non-ASCII characters with spaces */ +static inline char ufshcd_remove_non_printable(u8 ch) { - return ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, buf, size); + return (ch >= 0x20 && ch <= 0x7e) ? ch : ' '; } /** * ufshcd_read_string_desc - read string descriptor * @hba: pointer to adapter instance * @desc_index: descriptor index - * @buf: pointer to buffer where descriptor would be read - * @size: size of buf + * @buf: pointer to buffer where descriptor would be read, + * the caller should free the memory. * @ascii: if true convert from unicode to ascii characters + * null terminated string. * - * Return 0 in case of success, non-zero otherwise + * Return: + * * string size on success. + * * -ENOMEM: on allocation failure + * * -EINVAL: on a wrong parameter */ -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii) +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii) { - int err = 0; + struct uc_string_id *uc_str; + u8 *str; + int ret; - err = ufshcd_read_desc(hba, - QUERY_DESC_IDN_STRING, desc_index, buf, size); + if (!buf) + return -EINVAL; - if (err) { - dev_err(hba->dev, "%s: reading String Desc failed after %d retries. err = %d\n", - __func__, QUERY_REQ_RETRIES, err); + uc_str = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + if (!uc_str) + return -ENOMEM; + + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_STRING, + desc_index, uc_str, + QUERY_DESC_MAX_SIZE); + if (ret < 0) { + dev_err(hba->dev, "Reading String Desc failed after %d retries. err = %d\n", + QUERY_REQ_RETRIES, ret); + str = NULL; + goto out; + } + + if (uc_str->len <= QUERY_DESC_HDR_SIZE) { + dev_dbg(hba->dev, "String Desc is of zero length\n"); + str = NULL; + ret = 0; goto out; } if (ascii) { - int desc_len; - int ascii_len; + ssize_t ascii_len; int i; - char *buff_ascii; - - desc_len = buf[0]; /* remove header and divide by 2 to move from UTF16 to UTF8 */ - ascii_len = (desc_len - QUERY_DESC_HDR_SIZE) / 2 + 1; - if (size < ascii_len + QUERY_DESC_HDR_SIZE) { - dev_err(hba->dev, "%s: buffer allocated size is too small\n", - __func__); - err = -ENOMEM; - goto out; - } - - buff_ascii = kmalloc(ascii_len, GFP_KERNEL); - if (!buff_ascii) { - err = -ENOMEM; + ascii_len = (uc_str->len - QUERY_DESC_HDR_SIZE) / 2 + 1; + str = kzalloc(ascii_len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; goto out; } @@ -3267,22 +3225,28 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, * the descriptor contains string in UTF16 format * we need to convert to utf-8 so it can be displayed */ - utf16s_to_utf8s((wchar_t *)&buf[QUERY_DESC_HDR_SIZE], - desc_len - QUERY_DESC_HDR_SIZE, - UTF16_BIG_ENDIAN, buff_ascii, ascii_len); + ret = utf16s_to_utf8s(uc_str->uc, + uc_str->len - QUERY_DESC_HDR_SIZE, + UTF16_BIG_ENDIAN, str, ascii_len); /* replace non-printable or non-ASCII characters with spaces */ - for (i = 0; i < ascii_len; i++) - ufshcd_remove_non_printable(&buff_ascii[i]); + for (i = 0; i < ret; i++) + str[i] = ufshcd_remove_non_printable(str[i]); + + str[ret++] = '\0'; - memset(buf + QUERY_DESC_HDR_SIZE, 0, - size - QUERY_DESC_HDR_SIZE); - memcpy(buf + QUERY_DESC_HDR_SIZE, buff_ascii, ascii_len); - buf[QUERY_DESC_LENGTH_OFFSET] = ascii_len + QUERY_DESC_HDR_SIZE; - kfree(buff_ascii); + } else { + str = kmemdup(uc_str, uc_str->len, GFP_KERNEL); + if (!str) { + ret = -ENOMEM; + goto out; + } + ret = uc_str->len; } out: - return err; + *buf = str; + kfree(uc_str); + return ret; } /** @@ -3305,7 +3269,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba, * Unit descriptors are only available for general purpose LUs (LUN id * from 0 to 7) and RPMB Well known LU. */ - if (!ufs_is_valid_unit_desc_lun(lun)) + if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) return -EOPNOTSUPP; return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun, @@ -3819,6 +3783,9 @@ static int ufshcd_link_recovery(struct ufs_hba *hba) ufshcd_set_eh_in_progress(hba); spin_unlock_irqrestore(hba->host->host_lock, flags); + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + ret = ufshcd_host_reset_and_restore(hba); spin_lock_irqsave(hba->host->host_lock, flags); @@ -3848,15 +3815,24 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba) ktime_to_us(ktime_sub(ktime_get(), start)), ret); if (ret) { + int err; + dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n", __func__, ret); /* - * If link recovery fails then return error so that caller - * don't retry the hibern8 enter again. + * If link recovery fails then return error code returned from + * ufshcd_link_recovery(). + * If link recovery succeeds then return -EAGAIN to attempt + * hibern8 enter retry again. */ - if (ufshcd_link_recovery(hba)) - ret = -ENOLINK; + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "%s: link recovery failed", __func__); + ret = err; + } else { + ret = -EAGAIN; + } } else ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER, POST_CHANGE); @@ -3870,14 +3846,14 @@ static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba) for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) { ret = __ufshcd_uic_hibern8_enter(hba); - if (!ret || ret == -ENOLINK) + if (!ret) goto out; } out: return ret; } -static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) { struct uic_command uic_cmd = {0}; int ret; @@ -3903,8 +3879,27 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_uic_hibern8_exit); -static void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit) +{ + unsigned long flags; + + if (!(hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT)) + return; + + spin_lock_irqsave(hba->host->host_lock, flags); + if (hba->ahit == ahit) + goto out_unlock; + hba->ahit = ahit; + if (!pm_runtime_suspended(hba->dev)) + ufshcd_writel(hba, hba->ahit, REG_AUTO_HIBERNATE_IDLE_TIMER); +out_unlock: + spin_unlock_irqrestore(hba->host->host_lock, flags); +} +EXPORT_SYMBOL_GPL(ufshcd_auto_hibern8_update); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba) { unsigned long flags; @@ -4043,6 +4038,26 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), pwr_mode->hs_rate); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4 | pwr_mode->pwr_tx); @@ -4136,7 +4151,7 @@ out: * * Returns 0 on success, non-zero value on failure */ -static int ufshcd_make_hba_operational(struct ufs_hba *hba) +int ufshcd_make_hba_operational(struct ufs_hba *hba) { int err = 0; u32 reg; @@ -4182,6 +4197,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) out: return err; } +EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational); /** * ufshcd_hba_stop - Send controller to reset state @@ -4214,12 +4230,6 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) { int retry; - /* - * msleep of 1 and 5 used in this function might result in msleep(20), - * but it was necessary to send the UFS FPGA to reset mode during - * development and testing of this driver. msleep can be changed to - * mdelay and retry count can be reduced based on the controller. - */ if (!ufshcd_is_hba_active(hba)) /* change controller state to "reset state" */ ufshcd_hba_stop(hba, true); @@ -4242,7 +4252,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) * instruction might be read back. * This delay can be changed based on the controller. */ - msleep(1); + usleep_range(1000, 1100); /* wait for the host controller to complete initialization */ retry = 10; @@ -4254,7 +4264,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) "Controller enable failed\n"); return -EIO; } - msleep(5); + usleep_range(5000, 5100); } /* enable UIC related interrupts */ @@ -4265,7 +4275,7 @@ static int ufshcd_hba_execute_hce(struct ufs_hba *hba) return 0; } -static int ufshcd_hba_enable(struct ufs_hba *hba) +int ufshcd_hba_enable(struct ufs_hba *hba) { int ret; @@ -4290,6 +4300,8 @@ static int ufshcd_hba_enable(struct ufs_hba *hba) return ret; } +EXPORT_SYMBOL_GPL(ufshcd_hba_enable); + static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer) { int tx_lanes, i, err = 0; @@ -4326,6 +4338,15 @@ static inline int ufshcd_disable_device_tx_lcc(struct ufs_hba *hba) return ufshcd_disable_tx_lcc(hba, true); } +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg) +{ + reg_hist->reg[reg_hist->pos] = reg; + reg_hist->tstamp[reg_hist->pos] = ktime_get(); + reg_hist->pos = (reg_hist->pos + 1) % UFS_ERR_REG_HIST_LENGTH; +} +EXPORT_SYMBOL_GPL(ufshcd_update_reg_hist); + /** * ufshcd_link_startup - Initialize unipro link startup * @hba: per adapter instance @@ -4353,6 +4374,8 @@ link_startup: /* check if device is detected by inter-connect layer */ if (!ret && !ufshcd_is_device_present(hba)) { + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + 0); dev_err(hba->dev, "%s: Device not present\n", __func__); ret = -ENXIO; goto out; @@ -4363,13 +4386,19 @@ link_startup: * but we can't be sure if the link is up until link startup * succeeds. So reset the local Uni-Pro and try again. */ - if (ret && ufshcd_hba_enable(hba)) + if (ret && ufshcd_hba_enable(hba)) { + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + (u32)ret); goto out; + } } while (ret && retries--); - if (ret) + if (ret) { /* failed to get the link up... retire */ + ufshcd_update_reg_hist(&hba->ufs_stats.link_startup_err, + (u32)ret); goto out; + } if (link_startup_again) { link_startup_again = false; @@ -4499,7 +4528,7 @@ static int ufshcd_get_lu_wp(struct ufs_hba *hba, * protected so skip reading bLUWriteProtect parameter for * it. For other W-LUs, UNIT DESCRIPTOR is not available. */ - else if (lun >= UFS_UPIU_MAX_GENERAL_LUN) + else if (lun >= hba->dev_info.max_lu_supported) ret = -ENOTSUPP; else ret = ufshcd_read_unit_desc_param(hba, @@ -4546,6 +4575,9 @@ static int ufshcd_slave_alloc(struct scsi_device *sdev) /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */ sdev->use_10_for_ms = 1; + /* DBD field should be set to 1 in mode sense(10) */ + sdev->set_dbd_for_ms = 1; + /* allow SCSI layer to restart the device in case of errors */ sdev->allow_restart = 1; @@ -4584,9 +4616,14 @@ static int ufshcd_change_queue_depth(struct scsi_device *sdev, int depth) */ static int ufshcd_slave_configure(struct scsi_device *sdev) { + struct ufs_hba *hba = shost_priv(sdev->host); struct request_queue *q = sdev->request_queue; blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); + + if (ufshcd_is_rpm_autosuspend_allowed(hba)) + sdev->rpm_autosuspend = 1; + return 0; } @@ -4732,7 +4769,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ - if (host_byte(result) != DID_OK) + if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -4741,19 +4778,29 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * ufshcd_uic_cmd_compl - handle completion of uic command * @hba: per adapter instance * @intr_status: interrupt status generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { hba->active_uic_cmd->argument2 |= ufshcd_get_uic_cmd_result(hba); hba->active_uic_cmd->argument3 = ufshcd_get_dme_attr_val(hba); complete(&hba->active_uic_cmd->done); + retval = IRQ_HANDLED; } - if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) + if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done) { complete(hba->uic_async_done); + retval = IRQ_HANDLED; + } + return retval; } /** @@ -4779,12 +4826,13 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, cmd->result = result; /* Mark completed command as NULL in LRB */ lrbp->cmd = NULL; - clear_bit_unlock(index, &hba->lrb_in_use); + lrbp->compl_time_stamp = ktime_get(); /* Do not touch lrbp after scsi done */ cmd->scsi_done(cmd); __ufshcd_release(hba); } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE || lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) { + lrbp->compl_time_stamp = ktime_get(); if (hba->dev_cmd.complete) { ufshcd_add_command_trace(hba, index, "dev_complete"); @@ -4793,24 +4841,23 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba, } if (ufshcd_is_clkscaling_supported(hba)) hba->clk_scaling.active_reqs--; - - lrbp->compl_time_stamp = ktime_get(); } /* clear corresponding bits of completed commands */ hba->outstanding_reqs ^= completed_reqs; ufshcd_clk_scaling_update_busy(hba); - - /* we might have free'd some tags above */ - wake_up(&hba->dev_cmd.tag_wq); } /** * ufshcd_transfer_req_compl - handle SCSI and query command completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_transfer_req_compl(struct ufs_hba *hba) +static irqreturn_t ufshcd_transfer_req_compl(struct ufs_hba *hba) { unsigned long completed_reqs; u32 tr_doorbell; @@ -4829,7 +4876,12 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); completed_reqs = tr_doorbell ^ hba->outstanding_reqs; - __ufshcd_transfer_req_compl(hba, completed_reqs); + if (completed_reqs) { + __ufshcd_transfer_req_compl(hba, completed_reqs); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } } /** @@ -4967,6 +5019,7 @@ static int ufshcd_disable_auto_bkops(struct ufs_hba *hba) hba->auto_bkops_enabled = false; trace_ufshcd_auto_bkops_state(dev_name(hba->dev), "Disabled"); + hba->is_urgent_bkops_lvl_checked = false; out: return err; } @@ -4991,6 +5044,7 @@ static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba) hba->ee_ctrl_mask &= ~MASK_EE_URGENT_BKOPS; ufshcd_disable_auto_bkops(hba); } + hba->is_urgent_bkops_lvl_checked = false; } static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status) @@ -5037,6 +5091,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba, err = ufshcd_enable_auto_bkops(hba); else err = ufshcd_disable_auto_bkops(hba); + hba->urgent_bkops_lvl = curr_status; out: return err; } @@ -5114,7 +5169,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) hba = container_of(work, struct ufs_hba, eeh_work); pm_runtime_get_sync(hba->dev); - scsi_block_requests(hba->host); + ufshcd_scsi_block_requests(hba); err = ufshcd_get_ee_status(hba, &status); if (err) { dev_err(hba->dev, "%s: failed to get exception status %d\n", @@ -5128,7 +5183,7 @@ static void ufshcd_exception_event_handler(struct work_struct *work) ufshcd_bkops_exception_event_handler(hba); out: - scsi_unblock_requests(hba->host); + ufshcd_scsi_unblock_requests(hba); pm_runtime_put_sync(hba->dev); return; } @@ -5262,8 +5317,8 @@ static void ufshcd_err_handler(struct work_struct *work) /* * if host reset is required then skip clearing the pending - * transfers forcefully because they will automatically get - * cleared after link startup. + * transfers forcefully because they will get cleared during + * host reset and restore */ if (needs_reset) goto skip_pending_xfer_clear; @@ -5345,72 +5400,80 @@ out: pm_runtime_put_sync(hba->dev); } -static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist, - u32 reg) -{ - reg_hist->reg[reg_hist->pos] = reg; - reg_hist->tstamp[reg_hist->pos] = ktime_get(); - reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH; -} - /** * ufshcd_update_uic_error - check and set fatal UIC error flags. * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_update_uic_error(struct ufs_hba *hba) +static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba) { u32 reg; + irqreturn_t retval = IRQ_NONE; /* PHY layer lane error */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER); /* Ignore LINERESET indication, as this is not an error */ if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) && - (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { + (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) { /* * To know whether this error is fatal or not, DB timeout * must be checked but this error is handled separately. */ dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__); - ufshcd_update_uic_reg_hist(&hba->ufs_stats.pa_err, reg); + ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg); + retval |= IRQ_HANDLED; } /* PA_INIT_ERROR is fatal and needs UIC reset */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER); - if (reg) - ufshcd_update_uic_reg_hist(&hba->ufs_stats.dl_err, reg); - - if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) - hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; - else if (hba->dev_quirks & - UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { - if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) - hba->uic_error |= - UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; - else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) - hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + if ((reg & UIC_DATA_LINK_LAYER_ERROR) && + (reg & UIC_DATA_LINK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.dl_err, reg); + + if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT) + hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR; + else if (hba->dev_quirks & + UFS_DEVICE_QUIRK_RECOVERY_FROM_DL_NAC_ERRORS) { + if (reg & UIC_DATA_LINK_LAYER_ERROR_NAC_RECEIVED) + hba->uic_error |= + UFSHCD_UIC_DL_NAC_RECEIVED_ERROR; + else if (reg & UIC_DATA_LINK_LAYER_ERROR_TCx_REPLAY_TIMEOUT) + hba->uic_error |= UFSHCD_UIC_DL_TCx_REPLAY_ERROR; + } + retval |= IRQ_HANDLED; } /* UIC NL/TL/DME errors needs software retry */ reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.nl_err, reg); + if ((reg & UIC_NETWORK_LAYER_ERROR) && + (reg & UIC_NETWORK_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.nl_err, reg); hba->uic_error |= UFSHCD_UIC_NL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.tl_err, reg); + if ((reg & UIC_TRANSPORT_LAYER_ERROR) && + (reg & UIC_TRANSPORT_LAYER_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.tl_err, reg); hba->uic_error |= UFSHCD_UIC_TL_ERROR; + retval |= IRQ_HANDLED; } reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME); - if (reg) { - ufshcd_update_uic_reg_hist(&hba->ufs_stats.dme_err, reg); + if ((reg & UIC_DME_ERROR) && + (reg & UIC_DME_ERROR_CODE_MASK)) { + ufshcd_update_reg_hist(&hba->ufs_stats.dme_err, reg); hba->uic_error |= UFSHCD_UIC_DME_ERROR; + retval |= IRQ_HANDLED; } dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n", __func__, hba->uic_error); + return retval; } static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, @@ -5433,17 +5496,24 @@ static bool ufshcd_is_auto_hibern8_error(struct ufs_hba *hba, /** * ufshcd_check_errors - Check for errors that need s/w attention * @hba: per-adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_check_errors(struct ufs_hba *hba) +static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba) { bool queue_eh_work = false; + irqreturn_t retval = IRQ_NONE; - if (hba->errors & INT_FATAL_ERRORS) + if (hba->errors & INT_FATAL_ERRORS) { + ufshcd_update_reg_hist(&hba->ufs_stats.fatal_err, hba->errors); queue_eh_work = true; + } if (hba->errors & UIC_ERROR) { hba->uic_error = 0; - ufshcd_update_uic_error(hba); + retval = ufshcd_update_uic_error(hba); if (hba->uic_error) queue_eh_work = true; } @@ -5454,6 +5524,8 @@ static void ufshcd_check_errors(struct ufs_hba *hba) __func__, (hba->errors & UIC_HIBERNATE_ENTER) ? "Enter" : "Exit", hba->errors, ufshcd_get_upmcrs(hba)); + ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err, + hba->errors); queue_eh_work = true; } @@ -5489,6 +5561,7 @@ static void ufshcd_check_errors(struct ufs_hba *hba) } schedule_work(&hba->eh_work); } + retval |= IRQ_HANDLED; } /* * if (!queue_eh_work) - @@ -5496,44 +5569,81 @@ static void ufshcd_check_errors(struct ufs_hba *hba) * itself without s/w intervention or errors that will be * handled by the SCSI core layer. */ + return retval; +} + +struct ctm_info { + struct ufs_hba *hba; + unsigned long pending; + unsigned int ncpl; +}; + +static bool ufshcd_compl_tm(struct request *req, void *priv, bool reserved) +{ + struct ctm_info *const ci = priv; + struct completion *c; + + WARN_ON_ONCE(reserved); + if (test_bit(req->tag, &ci->pending)) + return true; + ci->ncpl++; + c = req->end_io_data; + if (c) + complete(c); + return true; } /** * ufshcd_tmc_handler - handle task management function completion * @hba: per adapter instance + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_tmc_handler(struct ufs_hba *hba) +static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) { - u32 tm_doorbell; + struct request_queue *q = hba->tmf_queue; + struct ctm_info ci = { + .hba = hba, + .pending = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL), + }; - tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL); - hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks; - wake_up(&hba->tm_wq); + blk_mq_tagset_busy_iter(q->tag_set, ufshcd_compl_tm, &ci); + return ci.ncpl ? IRQ_HANDLED : IRQ_NONE; } /** * ufshcd_sl_intr - Interrupt service routine * @hba: per adapter instance * @intr_status: contains interrupts generated by the controller + * + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ -static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) +static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) { + irqreturn_t retval = IRQ_NONE; + hba->errors = UFSHCD_ERROR_MASK & intr_status; if (ufshcd_is_auto_hibern8_error(hba, intr_status)) hba->errors |= (UFSHCD_UIC_HIBERN8_MASK & intr_status); if (hba->errors) - ufshcd_check_errors(hba); + retval |= ufshcd_check_errors(hba); if (intr_status & UFSHCD_UIC_MASK) - ufshcd_uic_cmd_compl(hba, intr_status); + retval |= ufshcd_uic_cmd_compl(hba, intr_status); if (intr_status & UTP_TASK_REQ_COMPL) - ufshcd_tmc_handler(hba); + retval |= ufshcd_tmc_handler(hba); if (intr_status & UTP_TRANSFER_REQ_COMPL) - ufshcd_transfer_req_compl(hba); + retval |= ufshcd_transfer_req_compl(hba); + + return retval; } /** @@ -5541,8 +5651,9 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) * @irq: irq number * @__hba: pointer to adapter instance * - * Returns IRQ_HANDLED - If interrupt is valid - * IRQ_NONE - If invalid interrupt + * Returns + * IRQ_HANDLED - If interrupt is valid + * IRQ_NONE - If invalid interrupt */ static irqreturn_t ufshcd_intr(int irq, void *__hba) { @@ -5565,14 +5676,18 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE); if (intr_status) ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS); - if (enabled_intr_status) { - ufshcd_sl_intr(hba, enabled_intr_status); - retval = IRQ_HANDLED; - } + if (enabled_intr_status) + retval |= ufshcd_sl_intr(hba, enabled_intr_status); intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); } while (intr_status && --retries); + if (retval == IRQ_NONE) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", + __func__, intr_status); + ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); + } + spin_unlock(hba->host->host_lock); return retval; } @@ -5601,7 +5716,10 @@ out: static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, struct utp_task_req_desc *treq, u8 tm_function) { + struct request_queue *q = hba->tmf_queue; struct Scsi_Host *host = hba->host; + DECLARE_COMPLETION_ONSTACK(wait); + struct request *req; unsigned long flags; int free_slot, task_tag, err; @@ -5610,7 +5728,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ - wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot)); + req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req->end_io_data = &wait; + free_slot = req->tag; + WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); ufshcd_hold(hba, false); spin_lock_irqsave(host->host_lock, flags); @@ -5636,10 +5757,14 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send"); /* wait until the task management command is completed */ - err = wait_event_timeout(hba->tm_wq, - test_bit(free_slot, &hba->tm_condition), + err = wait_for_completion_io_timeout(&wait, msecs_to_jiffies(TM_CMD_TIMEOUT)); if (!err) { + /* + * Make sure that ufshcd_compl_tm() does not trigger a + * use-after-free. + */ + req->end_io_data = NULL; ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err"); dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n", __func__, tm_function); @@ -5652,16 +5777,13 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq)); ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete"); - - spin_lock_irqsave(hba->host->host_lock, flags); - __clear_bit(free_slot, &hba->outstanding_tasks); - spin_unlock_irqrestore(hba->host->host_lock, flags); - } - clear_bit(free_slot, &hba->tm_condition); - ufshcd_put_tm_slot(hba, free_slot); - wake_up(&hba->tm_tag_wq); + spin_lock_irqsave(hba->host->host_lock, flags); + __clear_bit(free_slot, &hba->outstanding_tasks); + spin_unlock_irqrestore(hba->host->host_lock, flags); + + blk_put_request(req); ufshcd_release(hba); return err; @@ -5718,9 +5840,9 @@ static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id, * @hba: per-adapter instance * @req_upiu: upiu request * @rsp_upiu: upiu reply - * @msgcode: message code, one of UPIU Transaction Codes Initiator to Target * @desc_buff: pointer to descriptor buffer, NULL if NA * @buff_len: descriptor size, 0 if NA + * @cmd_type: specifies the type (NOP, Query...) * @desc_op: descriptor operation * * Those type of requests uses UTP Transfer Request Descriptor - utrd. @@ -5734,9 +5856,11 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, struct utp_upiu_req *rsp_upiu, u8 *desc_buff, int *buff_len, - int cmd_type, + enum dev_cmd_type cmd_type, enum query_opcode desc_op) { + struct request_queue *q = hba->cmd_queue; + struct request *req; struct ufshcd_lrb *lrbp; int err = 0; int tag; @@ -5746,7 +5870,13 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, down_read(&hba->clk_scaling_lock); - wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag)); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) { + err = PTR_ERR(req); + goto out_unlock; + } + tag = req->tag; + WARN_ON_ONCE(!ufshcd_valid_tag(hba, tag)); init_completion(&wait); lrbp = &hba->lrb[tag]; @@ -5814,14 +5944,16 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, memcpy(desc_buff, descp, resp_len); *buff_len = resp_len; } else { - dev_warn(hba->dev, "rsp size is bigger than buffer"); + dev_warn(hba->dev, + "%s: rsp size %d is bigger than buffer size %d", + __func__, resp_len, *buff_len); *buff_len = 0; err = -EINVAL; } } - ufshcd_put_dev_cmd_tag(hba, tag); - wake_up(&hba->dev_cmd.tag_wq); + blk_put_request(req); +out_unlock: up_read(&hba->clk_scaling_lock); return err; } @@ -5849,7 +5981,7 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, enum query_opcode desc_op) { int err; - int cmd_type = DEV_CMD_TYPE_QUERY; + enum dev_cmd_type cmd_type = DEV_CMD_TYPE_QUERY; struct utp_task_req_desc treq = { { 0 }, }; int ocs_value; u8 tm_f = be32_to_cpu(req_upiu->header.dword_1) >> 16 & MASK_TM_FUNC; @@ -5941,6 +6073,7 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) out: hba->req_abort_count = 0; + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, (u32)err); if (!err) { err = SUCCESS; } else { @@ -6034,6 +6167,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) */ scsi_print_command(hba->lrb[tag].cmd); if (!hba->req_abort_count) { + ufshcd_update_reg_hist(&hba->ufs_stats.task_abort, 0); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); ufshcd_print_pwr_info(hba); @@ -6114,9 +6248,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd) hba->lrb[tag].cmd = NULL; spin_unlock_irqrestore(host->host_lock, flags); - clear_bit_unlock(tag, &hba->lrb_in_use); - wake_up(&hba->dev_cmd.tag_wq); - out: if (!err) { err = SUCCESS; @@ -6149,9 +6280,15 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) int err; unsigned long flags; - /* Reset the host controller */ + /* + * Stop the host controller and complete the requests + * cleared by h/w + */ spin_lock_irqsave(hba->host->host_lock, flags); ufshcd_hba_stop(hba, false); + hba->silence_err_logs = true; + ufshcd_complete_requests(hba); + hba->silence_err_logs = false; spin_unlock_irqrestore(hba->host->host_lock, flags); /* scale up clocks to max frequency before full reinitialization */ @@ -6162,14 +6299,14 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) goto out; /* Establish the link again and restore the device */ - err = ufshcd_probe_hba(hba); + err = ufshcd_probe_hba(hba, false); if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)) err = -EIO; out: if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); - + ufshcd_update_reg_hist(&hba->ufs_stats.host_reset, (u32)err); return err; } @@ -6185,22 +6322,15 @@ out: static int ufshcd_reset_and_restore(struct ufs_hba *hba) { int err = 0; - unsigned long flags; int retries = MAX_HOST_RESET_RETRIES; do { + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + err = ufshcd_host_reset_and_restore(hba); } while (err && --retries); - /* - * After reset the door-bell might be cleared, complete - * outstanding requests in s/w here. - */ - spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_transfer_req_compl(hba); - ufshcd_tmc_handler(hba); - spin_unlock_irqrestore(hba->host->host_lock, flags); - return err; } @@ -6355,7 +6485,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba) if (!desc_buf) return; - ret = ufshcd_read_power_desc(hba, desc_buf, buff_len); + ret = ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, + desc_buf, buff_len); if (ret) { dev_err(hba->dev, "%s: Failed reading power descriptor.len = %d ret = %d", @@ -6445,13 +6576,13 @@ out: return ret; } -static int ufs_get_device_desc(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static int ufs_get_device_desc(struct ufs_hba *hba) { int err; size_t buff_len; u8 model_index; u8 *desc_buf; + struct ufs_dev_info *dev_info = &hba->dev_info; buff_len = max_t(size_t, hba->desc_size.dev_desc, QUERY_DESC_MAX_SIZE + 1); @@ -6461,7 +6592,8 @@ static int ufs_get_device_desc(struct ufs_hba *hba, goto out; } - err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc); + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_DEVICE, 0, desc_buf, + hba->desc_size.dev_desc); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -6472,45 +6604,48 @@ static int ufs_get_device_desc(struct ufs_hba *hba, * getting vendor (manufacturerID) and Bank Index in big endian * format */ - dev_desc->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | + dev_info->wmanufacturerid = desc_buf[DEVICE_DESC_PARAM_MANF_ID] << 8 | desc_buf[DEVICE_DESC_PARAM_MANF_ID + 1]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; - - /* Zero-pad entire buffer for string termination. */ - memset(desc_buf, 0, buff_len); - - err = ufshcd_read_string_desc(hba, model_index, desc_buf, - QUERY_DESC_MAX_SIZE, true/*ASCII*/); - if (err) { + err = ufshcd_read_string_desc(hba, model_index, + &dev_info->model, SD_ASCII_STD); + if (err < 0) { dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n", __func__, err); goto out; } - desc_buf[QUERY_DESC_MAX_SIZE] = '\0'; - strlcpy(dev_desc->model, (desc_buf + QUERY_DESC_HDR_SIZE), - min_t(u8, desc_buf[QUERY_DESC_LENGTH_OFFSET], - MAX_MODEL_LEN)); - - /* Null terminate the model string */ - dev_desc->model[MAX_MODEL_LEN] = '\0'; + /* + * ufshcd_read_string_desc returns size of the string + * reset the error value + */ + err = 0; out: kfree(desc_buf); return err; } -static void ufs_fixup_device_setup(struct ufs_hba *hba, - struct ufs_dev_desc *dev_desc) +static void ufs_put_device_desc(struct ufs_hba *hba) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + + kfree(dev_info->model); + dev_info->model = NULL; +} + +static void ufs_fixup_device_setup(struct ufs_hba *hba) { struct ufs_dev_fix *f; + struct ufs_dev_info *dev_info = &hba->dev_info; for (f = ufs_fixups; f->quirk; f++) { - if ((f->card.wmanufacturerid == dev_desc->wmanufacturerid || - f->card.wmanufacturerid == UFS_ANY_VENDOR) && - (STR_PRFX_EQUAL(f->card.model, dev_desc->model) || - !strcmp(f->card.model, UFS_ANY_MODEL))) + if ((f->wmanufacturerid == dev_info->wmanufacturerid || + f->wmanufacturerid == UFS_ANY_VENDOR) && + ((dev_info->model && + STR_PRFX_EQUAL(f->model, dev_info->model)) || + !strcmp(f->model, UFS_ANY_MODEL))) hba->dev_quirks |= f->quirk; } } @@ -6681,17 +6816,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba) static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) { - int err_reg_hist_size = sizeof(struct ufs_uic_err_reg_hist); - hba->ufs_stats.hibern8_exit_cnt = 0; hba->ufs_stats.last_hibern8_exit_tstamp = ktime_set(0, 0); - - memset(&hba->ufs_stats.pa_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.dl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.nl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.tl_err, 0, err_reg_hist_size); - memset(&hba->ufs_stats.dme_err, 0, err_reg_hist_size); - hba->req_abort_count = 0; } @@ -6728,21 +6854,42 @@ static void ufshcd_init_desc_sizes(struct ufs_hba *hba) &hba->desc_size.geom_desc); if (err) hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; + err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_HEALTH, 0, &hba->desc_size.hlth_desc); if (err) hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; } -static void ufshcd_def_desc_sizes(struct ufs_hba *hba) +static int ufshcd_device_geo_params_init(struct ufs_hba *hba) { - hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE; - hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE; - hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE; - hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE; - hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE; - hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE; - hba->desc_size.hlth_desc = QUERY_DESC_HEALTH_DEF_SIZE; + int err; + size_t buff_len; + u8 *desc_buf; + + buff_len = hba->desc_size.geom_desc; + desc_buf = kmalloc(buff_len, GFP_KERNEL); + if (!desc_buf) { + err = -ENOMEM; + goto out; + } + + err = ufshcd_read_desc(hba, QUERY_DESC_IDN_GEOMETRY, 0, + desc_buf, buff_len); + if (err) { + dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", + __func__, err); + goto out; + } + + if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 1) + hba->dev_info.max_lu_supported = 32; + else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) + hba->dev_info.max_lu_supported = 8; + +out: + kfree(desc_buf); + return err; } static struct ufs_ref_clk ufs_ref_clk_freqs[] = { @@ -6813,15 +6960,92 @@ out: return err; } +static int ufshcd_device_params_init(struct ufs_hba *hba) +{ + bool flag; + int ret; + + /* Clear any previous UFS device information */ + memset(&hba->dev_info, 0, sizeof(hba->dev_info)); + + /* Init check for device descriptor sizes */ + ufshcd_init_desc_sizes(hba); + + /* Init UFS geometry descriptor related parameters */ + ret = ufshcd_device_geo_params_init(hba); + if (ret) + goto out; + + /* Check and apply UFS device quirks */ + ret = ufs_get_device_desc(hba); + if (ret) { + dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", + __func__, ret); + goto out; + } + + ufs_fixup_device_setup(hba); + + if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, + QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) + hba->dev_info.f_power_on_wp_en = flag; + + /* Probe maximum power mode co-supported by both UFS host and device */ + if (ufshcd_get_max_pwr_mode(hba)) + dev_err(hba->dev, + "%s: Failed getting max supported power mode\n", + __func__); +out: + return ret; +} + +/** + * ufshcd_add_lus - probe and add UFS logical units + * @hba: per-adapter instance + */ +static int ufshcd_add_lus(struct ufs_hba *hba) +{ + int ret; + + ufshcd_init_icc_levels(hba); + + /* Add required well known logical units to scsi mid layer */ + ret = ufshcd_scsi_add_wlus(hba); + if (ret) + goto out; + + /* Initialize devfreq after UFS device is detected */ + if (ufshcd_is_clkscaling_supported(hba)) { + memcpy(&hba->clk_scaling.saved_pwr_info.info, + &hba->pwr_info, + sizeof(struct ufs_pa_layer_attr)); + hba->clk_scaling.saved_pwr_info.is_valid = true; + if (!hba->devfreq) { + ret = ufshcd_devfreq_init(hba); + if (ret) + goto out; + } + + hba->clk_scaling.is_allowed = true; + } + + ufs_bsg_probe(hba); + scsi_scan_host(hba->host); + pm_runtime_put_sync(hba->dev); + +out: + return ret; +} + /** * ufshcd_probe_hba - probe hba to detect device and initialize * @hba: per-adapter instance + * @async: asynchronous execution or not * * Execute link-startup and verify device initialization */ -static int ufshcd_probe_hba(struct ufs_hba *hba) +static int ufshcd_probe_hba(struct ufs_hba *hba, bool async) { - struct ufs_dev_desc card = {0}; int ret; ktime_t start = ktime_get(); @@ -6839,28 +7063,26 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* UniPro link is active now */ ufshcd_set_link_active(hba); - /* Enable Auto-Hibernate if configured */ - ufshcd_auto_hibern8_enable(hba); - + /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); if (ret) goto out; + /* Initiate UFS initialization, and waiting until completion */ ret = ufshcd_complete_dev_init(hba); if (ret) goto out; - /* Init check for device descriptor sizes */ - ufshcd_init_desc_sizes(hba); - - ret = ufs_get_device_desc(hba, &card); - if (ret) { - dev_err(hba->dev, "%s: Failed getting device info. err = %d\n", - __func__, ret); - goto out; + /* + * Initialize UFS device parameters used by driver, these + * parameters are associated with UFS descriptors. + */ + if (async) { + ret = ufshcd_device_params_init(hba); + if (ret) + goto out; } - ufs_fixup_device_setup(hba, &card); ufshcd_tune_unipro_params(hba); /* UFS device is also active now */ @@ -6868,11 +7090,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) ufshcd_force_reset_auto_bkops(hba); hba->wlun_dev_clr_ua = true; - if (ufshcd_get_max_pwr_mode(hba)) { - dev_err(hba->dev, - "%s: Failed getting max supported power mode\n", - __func__); - } else { + /* Gear up to HS gear if supported */ + if (hba->max_pwr_info.is_valid) { /* * Set the right value to bRefClkFreq before attempting to * switch to HS gears. @@ -6890,59 +7109,10 @@ static int ufshcd_probe_hba(struct ufs_hba *hba) /* set the state as operational after switching to desired gear */ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL; - /* - * If we are in error handling context or in power management callbacks - * context, no need to scan the host - */ - if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - bool flag; - - /* clear any previous UFS device information */ - memset(&hba->dev_info, 0, sizeof(hba->dev_info)); - if (!ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG, - QUERY_FLAG_IDN_PWR_ON_WPE, &flag)) - hba->dev_info.f_power_on_wp_en = flag; - - if (!hba->is_init_prefetch) - ufshcd_init_icc_levels(hba); - - /* Add required well known logical units to scsi mid layer */ - if (ufshcd_scsi_add_wlus(hba)) - goto out; - - /* Initialize devfreq after UFS device is detected */ - if (ufshcd_is_clkscaling_supported(hba)) { - memcpy(&hba->clk_scaling.saved_pwr_info.info, - &hba->pwr_info, - sizeof(struct ufs_pa_layer_attr)); - hba->clk_scaling.saved_pwr_info.is_valid = true; - if (!hba->devfreq) { - ret = ufshcd_devfreq_init(hba); - if (ret) - goto out; - } - hba->clk_scaling.is_allowed = true; - } - - ufs_bsg_probe(hba); - - scsi_scan_host(hba->host); - pm_runtime_put_sync(hba->dev); - } - - if (!hba->is_init_prefetch) - hba->is_init_prefetch = true; + /* Enable Auto-Hibernate if configured */ + ufshcd_auto_hibern8_enable(hba); out: - /* - * If we failed to initialize the device or the device is not - * present, turn off the power/clocks etc. - */ - if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) { - pm_runtime_put_sync(hba->dev); - ufshcd_exit_clk_scaling(hba); - ufshcd_hba_exit(hba); - } trace_ufshcd_init(dev_name(hba->dev), ret, ktime_to_us(ktime_sub(ktime_get(), start)), @@ -6958,43 +7128,25 @@ out: static void ufshcd_async_scan(void *data, async_cookie_t cookie) { struct ufs_hba *hba = (struct ufs_hba *)data; + int ret; - ufshcd_probe_hba(hba); -} - -static enum blk_eh_timer_return ufshcd_eh_timed_out(struct scsi_cmnd *scmd) -{ - unsigned long flags; - struct Scsi_Host *host; - struct ufs_hba *hba; - int index; - bool found = false; - - if (!scmd || !scmd->device || !scmd->device->host) - return BLK_EH_DONE; - - host = scmd->device->host; - hba = shost_priv(host); - if (!hba) - return BLK_EH_DONE; - - spin_lock_irqsave(host->host_lock, flags); - - for_each_set_bit(index, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[index].cmd == scmd) { - found = true; - break; - } - } - - spin_unlock_irqrestore(host->host_lock, flags); + /* Initialize hba, detect and initialize UFS device */ + ret = ufshcd_probe_hba(hba, true); + if (ret) + goto out; + /* Probe and add UFS logical units */ + ret = ufshcd_add_lus(hba); +out: /* - * Bypass SCSI error handling and reset the block layer timer if this - * SCSI command was not actually dispatched to UFS driver, otherwise - * let SCSI layer handle the error as usual. + * If we failed to initialize the device or the device is not + * present, turn off the power/clocks etc. */ - return found ? BLK_EH_DONE : BLK_EH_RESET_TIMER; + if (ret) { + pm_runtime_put_sync(hba->dev); + ufshcd_exit_clk_scaling(hba); + ufshcd_hba_exit(hba); + } } static const struct attribute_group *ufshcd_driver_groups[] = { @@ -7015,7 +7167,6 @@ static struct scsi_host_template ufshcd_driver_template = { .eh_abort_handler = ufshcd_abort, .eh_device_reset_handler = ufshcd_eh_device_reset_handler, .eh_host_reset_handler = ufshcd_eh_host_reset_handler, - .eh_timed_out = ufshcd_eh_timed_out, .this_id = -1, .sg_tablesize = SG_ALL, .cmd_per_lun = UFSHCD_CMD_PER_LUN, @@ -7025,6 +7176,7 @@ static struct scsi_host_template ufshcd_driver_template = { .track_queue_depth = 1, .sdev_groups = ufshcd_driver_groups, .dma_boundary = PAGE_SIZE - 1, + .rpm_autosuspend_delay = RPM_AUTOSUSPEND_DELAY_MS, }; static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg, @@ -7062,6 +7214,9 @@ static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, struct ufs_vreg *vreg) { + if (!vreg) + return 0; + return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); } @@ -7450,6 +7605,7 @@ static void ufshcd_hba_exit(struct ufs_hba *hba) ufshcd_setup_clocks(hba, false); ufshcd_setup_hba_vreg(hba, false); hba->is_powered = false; + ufs_put_device_desc(hba); } } @@ -7577,8 +7733,7 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba, * turning off the link would also turn off the device. */ else if ((req_link_state == UIC_LINK_OFF_STATE) && - (!check_for_bkops || (check_for_bkops && - !hba->auto_bkops_enabled))) { + (!check_for_bkops || !hba->auto_bkops_enabled)) { /* * Let's make sure that link is in low power mode, we are doing * this currently by putting the link in Hibern8. Otherway to @@ -7784,6 +7939,11 @@ disable_clks: ret = ufshcd_vops_suspend(hba, pm_op); if (ret) goto set_link_active; + /* + * Disable the host irq as host controller as there won't be any + * host controller transaction expected till resume. + */ + ufshcd_disable_irq(hba); if (!ufshcd_is_link_active(hba)) ufshcd_setup_clocks(hba, false); @@ -7793,11 +7953,7 @@ disable_clks: hba->clk_gating.state = CLKS_OFF; trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state); - /* - * Disable the host irq as host controller as there won't be any - * host controller transaction expected till resume. - */ - ufshcd_disable_irq(hba); + /* Put the host controller in low power mode if possible */ ufshcd_hba_vreg_set_lpm(hba); goto out; @@ -7820,6 +7976,8 @@ enable_gating: ufshcd_release(hba); out: hba->pm_op_in_progress = 0; + if (ret) + ufshcd_update_reg_hist(&hba->ufs_stats.suspend_err, (u32)ret); return ret; } @@ -7848,9 +8006,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) goto out; /* enable the host irq as host controller would be active soon */ - ret = ufshcd_enable_irq(hba); - if (ret) - goto disable_irq_and_vops_clks; + ufshcd_enable_irq(hba); ret = ufshcd_vreg_set_hpm(hba); if (ret) @@ -7901,12 +8057,12 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) if (hba->clk_scaling.is_allowed) ufshcd_resume_clkscaling(hba); - /* Schedule clock gating in case of no access to UFS device yet */ - ufshcd_release(hba); - /* Enable Auto-Hibernate if configured */ ufshcd_auto_hibern8_enable(hba); + /* Schedule clock gating in case of no access to UFS device yet */ + ufshcd_release(hba); + goto out; set_old_link_state: @@ -7922,6 +8078,8 @@ disable_irq_and_vops_clks: ufshcd_setup_clocks(hba, false); out: hba->pm_op_in_progress = 0; + if (ret) + ufshcd_update_reg_hist(&hba->ufs_stats.resume_err, (u32)ret); return ret; } @@ -8092,6 +8250,9 @@ int ufshcd_shutdown(struct ufs_hba *hba) { int ret = 0; + if (!hba->is_powered) + goto out; + if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba)) goto out; @@ -8119,6 +8280,9 @@ void ufshcd_remove(struct ufs_hba *hba) { ufs_bsg_remove(hba); ufs_sysfs_remove_nodes(hba->dev); + blk_cleanup_queue(hba->tmf_queue); + blk_mq_free_tag_set(&hba->tmf_tag_set); + blk_cleanup_queue(hba->cmd_queue); scsi_remove_host(hba->host); /* disable interrupts */ ufshcd_disable_intr(hba, hba->intr_mask); @@ -8197,6 +8361,18 @@ out_error: } EXPORT_SYMBOL(ufshcd_alloc_host); +/* This function exists because blk_mq_alloc_tag_set() requires this. */ +static blk_status_t ufshcd_queue_tmf(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *qd) +{ + WARN_ON_ONCE(true); + return BLK_STS_NOTSUPP; +} + +static const struct blk_mq_ops ufshcd_tmf_ops = { + .queue_rq = ufshcd_queue_tmf, +}; + /** * ufshcd_init - Driver initialization routine * @hba: per-adapter instance @@ -8220,9 +8396,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->mmio_base = mmio_base; hba->irq = irq; - /* Set descriptor lengths to specification defaults */ - ufshcd_def_desc_sizes(hba); - err = ufshcd_hba_init(hba); if (err) goto out_error; @@ -8269,10 +8442,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->max_pwr_info.is_valid = false; - /* Initailize wait queue for task management */ - init_waitqueue_head(&hba->tm_wq); - init_waitqueue_head(&hba->tm_tag_wq); - /* Initialize work queues */ INIT_WORK(&hba->eh_work, ufshcd_err_handler); INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler); @@ -8285,9 +8454,6 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) init_rwsem(&hba->clk_scaling_lock); - /* Initialize device management tag acquire wait queue */ - init_waitqueue_head(&hba->dev_cmd.tag_wq); - ufshcd_init_clk_gating(hba); ufshcd_init_clk_scaling(hba); @@ -8321,13 +8487,37 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) goto exit_gating; } + hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set); + if (IS_ERR(hba->cmd_queue)) { + err = PTR_ERR(hba->cmd_queue); + goto out_remove_scsi_host; + } + + hba->tmf_tag_set = (struct blk_mq_tag_set) { + .nr_hw_queues = 1, + .queue_depth = hba->nutmrs, + .ops = &ufshcd_tmf_ops, + .flags = BLK_MQ_F_NO_SCHED, + }; + err = blk_mq_alloc_tag_set(&hba->tmf_tag_set); + if (err < 0) + goto free_cmd_queue; + hba->tmf_queue = blk_mq_init_queue(&hba->tmf_tag_set); + if (IS_ERR(hba->tmf_queue)) { + err = PTR_ERR(hba->tmf_queue); + goto free_tmf_tag_set; + } + + /* Reset the attached device */ + ufshcd_vops_device_reset(hba); + /* Host controller enable */ err = ufshcd_hba_enable(hba); if (err) { dev_err(hba->dev, "Host controller enable failed\n"); ufshcd_print_host_regs(hba); ufshcd_print_host_state(hba); - goto out_remove_scsi_host; + goto free_tmf_queue; } /* @@ -8364,6 +8554,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) return 0; +free_tmf_queue: + blk_cleanup_queue(hba->tmf_queue); +free_tmf_tag_set: + blk_mq_free_tag_set(&hba->tmf_tag_set); +free_cmd_queue: + blk_cleanup_queue(hba->cmd_queue); out_remove_scsi_host: scsi_remove_host(hba->host); exit_gating: diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 994d73d03207..2ae6c7c8528c 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -212,13 +212,11 @@ struct ufs_query { * @type: device management command type - Query, NOP OUT * @lock: lock to allow one command at a time * @complete: internal commands completion - * @tag_wq: wait queue until free command slot is available */ struct ufs_dev_cmd { enum dev_cmd_type type; struct mutex lock; struct completion *complete; - wait_queue_head_t tag_wq; struct ufs_query query; }; @@ -298,6 +296,7 @@ struct ufs_pwr_mode_info { * @resume: called during host controller PM callback * @dbg_register_dump: used to dump controller debug information * @phy_initialization: used to initialize phys + * @device_reset: called to issue a reset pulse on the UFS device */ struct ufs_hba_variant_ops { const char *name; @@ -321,11 +320,12 @@ struct ufs_hba_variant_ops { void (*setup_task_mgmt)(struct ufs_hba *, int, u8); void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme, enum ufs_notify_change_status); - int (*apply_dev_quirks)(struct ufs_hba *); + int (*apply_dev_quirks)(struct ufs_hba *hba); int (*suspend)(struct ufs_hba *, enum ufs_pm_op); int (*resume)(struct ufs_hba *, enum ufs_pm_op); void (*dbg_register_dump)(struct ufs_hba *hba); int (*phy_initialization)(struct ufs_hba *); + void (*device_reset)(struct ufs_hba *hba); }; /* clock gating state */ @@ -412,17 +412,17 @@ struct ufs_init_prefetch { u32 icc_level; }; -#define UIC_ERR_REG_HIST_LENGTH 8 +#define UFS_ERR_REG_HIST_LENGTH 8 /** - * struct ufs_uic_err_reg_hist - keeps history of uic errors + * struct ufs_err_reg_hist - keeps history of errors * @pos: index to indicate cyclic buffer position * @reg: cyclic buffer for registers value * @tstamp: cyclic buffer for time stamp */ -struct ufs_uic_err_reg_hist { +struct ufs_err_reg_hist { int pos; - u32 reg[UIC_ERR_REG_HIST_LENGTH]; - ktime_t tstamp[UIC_ERR_REG_HIST_LENGTH]; + u32 reg[UFS_ERR_REG_HIST_LENGTH]; + ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH]; }; /** @@ -436,15 +436,37 @@ struct ufs_uic_err_reg_hist { * @nl_err: tracks nl-uic errors * @tl_err: tracks tl-uic errors * @dme_err: tracks dme errors + * @auto_hibern8_err: tracks auto-hibernate errors + * @fatal_err: tracks fatal errors + * @linkup_err: tracks link-startup errors + * @resume_err: tracks resume errors + * @suspend_err: tracks suspend errors + * @dev_reset: tracks device reset events + * @host_reset: tracks host reset events + * @tsk_abort: tracks task abort events */ struct ufs_stats { u32 hibern8_exit_cnt; ktime_t last_hibern8_exit_tstamp; - struct ufs_uic_err_reg_hist pa_err; - struct ufs_uic_err_reg_hist dl_err; - struct ufs_uic_err_reg_hist nl_err; - struct ufs_uic_err_reg_hist tl_err; - struct ufs_uic_err_reg_hist dme_err; + + /* uic specific errors */ + struct ufs_err_reg_hist pa_err; + struct ufs_err_reg_hist dl_err; + struct ufs_err_reg_hist nl_err; + struct ufs_err_reg_hist tl_err; + struct ufs_err_reg_hist dme_err; + + /* fatal errors */ + struct ufs_err_reg_hist auto_hibern8_err; + struct ufs_err_reg_hist fatal_err; + struct ufs_err_reg_hist link_startup_err; + struct ufs_err_reg_hist resume_err; + struct ufs_err_reg_hist suspend_err; + + /* abnormal events */ + struct ufs_err_reg_hist dev_reset; + struct ufs_err_reg_hist host_reset; + struct ufs_err_reg_hist task_abort; }; /** @@ -459,7 +481,7 @@ struct ufs_stats { * @host: Scsi_Host instance of the driver * @dev: device handle * @lrb: local reference block - * @lrb_in_use: lrb in use + * @cmd_queue: Used to allocate command tags from hba->host->tag_set. * @outstanding_tasks: Bits representing outstanding task requests * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities @@ -471,17 +493,14 @@ struct ufs_stats { * @irq: Irq number of the controller * @active_uic_cmd: handle of active UIC command * @uic_cmd_mutex: mutex for uic command - * @tm_wq: wait queue for task management - * @tm_tag_wq: wait queue for free task management slots - * @tm_slots_in_use: bit map of task management request slots in use + * @tmf_tag_set: TMF tag set. + * @tmf_queue: Used to allocate TMF tags. * @pwr_done: completion for power mode change - * @tm_condition: condition variable for task management * @ufshcd_state: UFSHCD states * @eh_flags: Error handling flags * @intr_mask: Interrupt Mask Bits * @ee_ctrl_mask: Exception event control mask * @is_powered: flag to check if HBA is powered - * @is_init_prefetch: flag to check if data was pre-fetched in initialization * @init_prefetch_data: data pre-fetched during initialization * @eh_work: Worker to handle UFS errors that require s/w attention * @eeh_work: Worker to handle exception events @@ -489,6 +508,7 @@ struct ufs_stats { * @uic_error: UFS interconnect layer error status * @saved_err: sticky error mask * @saved_uic_err: sticky UIC error mask + * @silence_err_logs: flag to silence error logs * @dev_cmd: ufs device management command information * @last_dme_cmd_tstamp: time stamp of the last completed DME command * @auto_bkops_enabled: to track whether bkops is enabled in device @@ -517,6 +537,7 @@ struct ufs_hba { struct Scsi_Host *host; struct device *dev; + struct request_queue *cmd_queue; /* * This field is to keep a reference to "scsi_device" corresponding to * "UFS device" W-LU. @@ -537,7 +558,6 @@ struct ufs_hba { u32 ahit; struct ufshcd_lrb *lrb; - unsigned long lrb_in_use; unsigned long outstanding_tasks; unsigned long outstanding_reqs; @@ -619,10 +639,8 @@ struct ufs_hba { /* Device deviations from standard UFS device spec. */ unsigned int dev_quirks; - wait_queue_head_t tm_wq; - wait_queue_head_t tm_tag_wq; - unsigned long tm_condition; - unsigned long tm_slots_in_use; + struct blk_mq_tag_set tmf_tag_set; + struct request_queue *tmf_queue; struct uic_command *active_uic_cmd; struct mutex uic_cmd_mutex; @@ -633,7 +651,6 @@ struct ufs_hba { u32 intr_mask; u16 ee_ctrl_mask; bool is_powered; - bool is_init_prefetch; struct ufs_init_prefetch init_prefetch_data; /* Work Queues */ @@ -646,6 +663,7 @@ struct ufs_hba { u32 saved_err; u32 saved_uic_err; struct ufs_stats ufs_stats; + bool silence_err_logs; /* Device management request data */ struct ufs_dev_cmd dev_cmd; @@ -692,6 +710,12 @@ struct ufs_hba { * the performance of ongoing read/write operations. */ #define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5) + /* + * This capability allows host controller driver to automatically + * enable runtime power management by itself instead of waiting + * for userspace to control the power management. + */ +#define UFSHCD_CAP_RPM_AUTOSUSPEND (1 << 6) struct devfreq *devfreq; struct ufs_clk_scaling clk_scaling; @@ -725,6 +749,10 @@ static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba) { return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND; } +static inline bool ufshcd_is_rpm_autosuspend_allowed(struct ufs_hba *hba) +{ + return hba->caps & UFSHCD_CAP_RPM_AUTOSUSPEND; +} static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba) { @@ -769,12 +797,17 @@ static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg) int ufshcd_alloc_host(struct device *, struct ufs_hba **); void ufshcd_dealloc_host(struct ufs_hba *); +int ufshcd_hba_enable(struct ufs_hba *hba); int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int); +int ufshcd_make_hba_operational(struct ufs_hba *hba); void ufshcd_remove(struct ufs_hba *); +int ufshcd_uic_hibern8_exit(struct ufs_hba *hba); int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask, u32 val, unsigned long interval_us, unsigned long timeout_ms, bool can_sleep); void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); +void ufshcd_update_reg_hist(struct ufs_err_reg_hist *reg_hist, + u32 reg); static inline void check_upiu_size(void) { @@ -891,8 +924,14 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, enum attr_idn idn, u8 index, u8 selector, u32 *attr_val); int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, bool *flag_res); -int ufshcd_read_string_desc(struct ufs_hba *hba, int desc_index, - u8 *buf, u32 size, bool ascii); + +void ufshcd_auto_hibern8_enable(struct ufs_hba *hba); +void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); + +#define SD_ASCII_STD true +#define SD_RAW false +int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, + u8 **buf, bool ascii); int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); @@ -1045,6 +1084,14 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba) hba->vops->dbg_register_dump(hba); } +static inline void ufshcd_vops_device_reset(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->device_reset) { + hba->vops->device_reset(hba); + ufshcd_update_reg_hist(&hba->ufs_stats.dev_reset, 0); + } +} + extern struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /* diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index dbb75cd28dc8..c2961d37cc1c 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h @@ -195,7 +195,7 @@ enum { /* UECDL - Host UIC Error Code Data Link Layer 3Ch */ #define UIC_DATA_LINK_LAYER_ERROR 0x80000000 -#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0x7FFF +#define UIC_DATA_LINK_LAYER_ERROR_CODE_MASK 0xFFFF #define UIC_DATA_LINK_LAYER_ERROR_TCX_REP_TIMER_EXP 0x2 #define UIC_DATA_LINK_LAYER_ERROR_AFCX_REQ_TIMER_EXP 0x4 #define UIC_DATA_LINK_LAYER_ERROR_FCX_PRO_TIMER_EXP 0x8 diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h index f539f873f94d..3dc4d8b76509 100644 --- a/drivers/scsi/ufs/unipro.h +++ b/drivers/scsi/ufs/unipro.h @@ -161,6 +161,17 @@ /* PHY Adapter Protocol Constants */ #define PA_MAXDATALANES 4 +#define DL_FC0ProtectionTimeOutVal_Default 8191 +#define DL_TC0ReplayTimeOutVal_Default 65535 +#define DL_AFC0ReqTimeOutVal_Default 32767 +#define DL_FC1ProtectionTimeOutVal_Default 8191 +#define DL_TC1ReplayTimeOutVal_Default 65535 +#define DL_AFC1ReqTimeOutVal_Default 32767 + +#define DME_LocalFC0ProtectionTimeOutVal 0xD041 +#define DME_LocalTC0ReplayTimeOutVal 0xD042 +#define DME_LocalAFC0ReqTimeOutVal 0xD043 + /* PA power modes */ enum { FAST_MODE = 1, diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 297e1076e571..bfec84aacd90 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -30,6 +30,8 @@ #include <linux/seqlock.h> #include <linux/blk-mq-virtio.h> +#include "sd.h" + #define VIRTIO_SCSI_MEMPOOL_SZ 64 #define VIRTIO_SCSI_EVENT_LEN 8 #define VIRTIO_SCSI_VQ_BASE 2 @@ -324,6 +326,36 @@ static void virtscsi_handle_param_change(struct virtio_scsi *vscsi, scsi_device_put(sdev); } +static void virtscsi_rescan_hotunplug(struct virtio_scsi *vscsi) +{ + struct scsi_device *sdev; + struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev); + unsigned char scsi_cmd[MAX_COMMAND_SIZE]; + int result, inquiry_len, inq_result_len = 256; + char *inq_result = kmalloc(inq_result_len, GFP_KERNEL); + + shost_for_each_device(sdev, shost) { + inquiry_len = sdev->inquiry_len ? sdev->inquiry_len : 36; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = INQUIRY; + scsi_cmd[4] = (unsigned char) inquiry_len; + + memset(inq_result, 0, inq_result_len); + + result = scsi_execute_req(sdev, scsi_cmd, DMA_FROM_DEVICE, + inq_result, inquiry_len, NULL, + SD_TIMEOUT, SD_MAX_RETRIES, NULL); + + if (result == 0 && inq_result[0] >> 5) { + /* PQ indicates the LUN is not attached */ + scsi_remove_device(sdev); + } + } + + kfree(inq_result); +} + static void virtscsi_handle_event(struct work_struct *work) { struct virtio_scsi_event_node *event_node = @@ -335,6 +367,7 @@ static void virtscsi_handle_event(struct work_struct *work) cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED)) { event->event &= ~cpu_to_virtio32(vscsi->vdev, VIRTIO_SCSI_T_EVENTS_MISSED); + virtscsi_rescan_hotunplug(vscsi); scsi_scan_host(virtio_scsi_host(vscsi->vdev)); } @@ -369,14 +402,7 @@ static void virtscsi_event_done(struct virtqueue *vq) virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event); }; -/** - * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue - * @vq : the struct virtqueue we're talking about - * @cmd : command structure - * @req_size : size of the request buffer - * @resp_size : size of the response buffer - */ -static int virtscsi_add_cmd(struct virtqueue *vq, +static int __virtscsi_add_cmd(struct virtqueue *vq, struct virtio_scsi_cmd *cmd, size_t req_size, size_t resp_size) { @@ -421,17 +447,39 @@ static int virtscsi_add_cmd(struct virtqueue *vq, return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, GFP_ATOMIC); } -static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq, +static void virtscsi_kick_vq(struct virtio_scsi_vq *vq) +{ + bool needs_kick; + unsigned long flags; + + spin_lock_irqsave(&vq->vq_lock, flags); + needs_kick = virtqueue_kick_prepare(vq->vq); + spin_unlock_irqrestore(&vq->vq_lock, flags); + + if (needs_kick) + virtqueue_notify(vq->vq); +} + +/** + * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue, optionally kick it + * @vq : the struct virtqueue we're talking about + * @cmd : command structure + * @req_size : size of the request buffer + * @resp_size : size of the response buffer + * @kick : whether to kick the virtqueue immediately + */ +static int virtscsi_add_cmd(struct virtio_scsi_vq *vq, struct virtio_scsi_cmd *cmd, - size_t req_size, size_t resp_size) + size_t req_size, size_t resp_size, + bool kick) { unsigned long flags; int err; bool needs_kick = false; spin_lock_irqsave(&vq->vq_lock, flags); - err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); - if (!err) + err = __virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size); + if (!err && kick) needs_kick = virtqueue_kick_prepare(vq->vq); spin_unlock_irqrestore(&vq->vq_lock, flags); @@ -496,6 +544,7 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost, struct virtio_scsi *vscsi = shost_priv(shost); struct virtio_scsi_vq *req_vq = virtscsi_pick_vq_mq(vscsi, sc); struct virtio_scsi_cmd *cmd = scsi_cmd_priv(sc); + bool kick; unsigned long flags; int req_size; int ret; @@ -525,7 +574,8 @@ static int virtscsi_queuecommand(struct Scsi_Host *shost, req_size = sizeof(cmd->req.cmd); } - ret = virtscsi_kick_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd)); + kick = (sc->flags & SCMD_LAST) != 0; + ret = virtscsi_add_cmd(req_vq, cmd, req_size, sizeof(cmd->resp.cmd), kick); if (ret == -EIO) { cmd->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; spin_lock_irqsave(&req_vq->vq_lock, flags); @@ -543,8 +593,8 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd) int ret = FAILED; cmd->comp = ∁ - if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd, - sizeof cmd->req.tmf, sizeof cmd->resp.tmf) < 0) + if (virtscsi_add_cmd(&vscsi->ctrl_vq, cmd, + sizeof cmd->req.tmf, sizeof cmd->resp.tmf, true) < 0) goto out; wait_for_completion(&comp); @@ -658,6 +708,13 @@ static int virtscsi_map_queues(struct Scsi_Host *shost) return blk_mq_virtio_map_queues(qmap, vscsi->vdev, 2); } +static void virtscsi_commit_rqs(struct Scsi_Host *shost, u16 hwq) +{ + struct virtio_scsi *vscsi = shost_priv(shost); + + virtscsi_kick_vq(&vscsi->req_vqs[hwq]); +} + /* * The host guarantees to respond to each command, although I/O * latencies might be higher than on bare metal. Reset the timer @@ -675,6 +732,7 @@ static struct scsi_host_template virtscsi_host_template = { .this_id = -1, .cmd_size = sizeof(struct virtio_scsi_cmd), .queuecommand = virtscsi_queuecommand, + .commit_rqs = virtscsi_commit_rqs, .change_queue_depth = virtscsi_change_queue_depth, .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c index 70008816c91f..c3f010df641e 100644 --- a/drivers/scsi/vmw_pvscsi.c +++ b/drivers/scsi/vmw_pvscsi.c @@ -365,7 +365,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, int segs = scsi_dma_map(cmd); if (segs == -ENOMEM) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map cmd sglist for DMA.\n"); return -ENOMEM; } else if (segs > 1) { @@ -392,7 +392,7 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, ctx->dataPA = dma_map_single(&adapter->dev->dev, sg, bufflen, cmd->sc_data_direction); if (dma_mapping_error(&adapter->dev->dev, ctx->dataPA)) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map direct data buffer for DMA.\n"); return -ENOMEM; } @@ -402,6 +402,17 @@ static int pvscsi_map_buffers(struct pvscsi_adapter *adapter, return 0; } +/* + * The device incorrectly doesn't clear the first byte of the sense + * buffer in some cases. We have to do it ourselves. + * Otherwise we run into trouble when SWIOTLB is forced. + */ +static void pvscsi_patch_sense(struct scsi_cmnd *cmd) +{ + if (cmd->sense_buffer) + cmd->sense_buffer[0] = 0; +} + static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { @@ -544,6 +555,8 @@ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, cmd = ctx->cmd; abort_cmp = ctx->abort_cmp; pvscsi_unmap_buffers(adapter, ctx); + if (sdstat != SAM_STAT_CHECK_CONDITION) + pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); if (abort_cmp) { /* @@ -712,7 +725,7 @@ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); if (dma_mapping_error(&adapter->dev->dev, ctx->sensePA)) { - scmd_printk(KERN_ERR, cmd, + scmd_printk(KERN_DEBUG, cmd, "vmw_pvscsi: Failed to map sense buffer for DMA.\n"); ctx->sensePA = 0; return -ENOMEM; @@ -873,6 +886,7 @@ static void pvscsi_reset_all(struct pvscsi_adapter *adapter) scmd_printk(KERN_ERR, cmd, "Forced reset on cmd %p\n", cmd); pvscsi_unmap_buffers(adapter, ctx); + pvscsi_patch_sense(cmd); pvscsi_release_context(adapter, ctx); cmd->result = (DID_RESET << 16); cmd->scsi_done(cmd); diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index fb7b289fa09f..f81046f0e68a 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c @@ -1854,6 +1854,7 @@ round_4(unsigned int x) case 1: --x; break; case 2: ++x; + /* fall through */ case 3: ++x; } return x; diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c index 77bce208210e..7eac76cccc4c 100644 --- a/drivers/scsi/zalon.c +++ b/drivers/scsi/zalon.c @@ -89,7 +89,7 @@ zalon_probe(struct parisc_device *dev) struct gsc_irq gsc_irq; u32 zalon_vers; int error = -ENODEV; - void __iomem *zalon = ioremap_nocache(dev->hpa.start, 4096); + void __iomem *zalon = ioremap(dev->hpa.start, 4096); void __iomem *io_port = zalon + GSC_SCSI_ZALON_OFFSET; static int unit = 0; struct Scsi_Host *host; diff --git a/drivers/scsi/zorro_esp.c b/drivers/scsi/zorro_esp.c index ca8e3abeb2c7..bdd82e497d5f 100644 --- a/drivers/scsi/zorro_esp.c +++ b/drivers/scsi/zorro_esp.c @@ -218,7 +218,14 @@ static int fastlane_esp_irq_pending(struct esp *esp) static u32 zorro_esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) { - return dma_len > 0xFFFF ? 0xFFFF : dma_len; + return dma_len > (1U << 16) ? (1U << 16) : dma_len; +} + +static u32 fastlane_esp_dma_length_limit(struct esp *esp, u32 dma_addr, + u32 dma_len) +{ + /* The old driver used 0xfffc as limit, so do that here too */ + return dma_len > 0xfffc ? 0xfffc : dma_len; } static void zorro_esp_reset_dma(struct esp *esp) @@ -604,7 +611,7 @@ static const struct esp_driver_ops fastlane_esp_ops = { .esp_write8 = zorro_esp_write8, .esp_read8 = zorro_esp_read8, .irq_pending = fastlane_esp_irq_pending, - .dma_length_limit = zorro_esp_dma_length_limit, + .dma_length_limit = fastlane_esp_dma_length_limit, .reset_dma = zorro_esp_reset_dma, .dma_drain = zorro_esp_dma_drain, .dma_invalidate = fastlane_esp_dma_invalidate, @@ -794,7 +801,7 @@ static int zorro_esp_probe(struct zorro_dev *z, /* additional setup required for Fastlane */ if (zep->zorro3 && ent->driver_data == ZORRO_BLZ1230II) { /* map full address space up to ESP base for DMA */ - zep->board_base = ioremap_nocache(board, + zep->board_base = ioremap(board, FASTLANE_ESP_ADDR-1); if (!zep->board_base) { pr_err("Cannot allocate board address space\n"); @@ -809,7 +816,7 @@ static int zorro_esp_probe(struct zorro_dev *z, esp->ops = zdd->esp_ops; if (ioaddr > 0xffffff) - esp->regs = ioremap_nocache(ioaddr, 0x20); + esp->regs = ioremap(ioaddr, 0x20); else /* ZorroII address space remapped nocache by early startup */ esp->regs = ZTWO_VADDR(ioaddr); @@ -835,7 +842,7 @@ static int zorro_esp_probe(struct zorro_dev *z, * Only Fastlane Z3 for now - add switch for correct struct * dma_registers size if adding any more */ - esp->dma_regs = ioremap_nocache(dmaaddr, + esp->dma_regs = ioremap(dmaaddr, sizeof(struct fastlane_dma_registers)); } else /* ZorroII address space remapped nocache by early startup */ |