diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 16:44:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 16:44:18 -0700 |
commit | ec7ae517537ae5c7b0b2cd7f562dfa3e7a05b954 (patch) | |
tree | e6b0c64a51a7c0aa0efd09d4f7a80872e3b1657a /drivers | |
parent | 97d2eb13a019ec09cc1a7ea2d3705c0b117b3c0d (diff) | |
parent | 590134fa78fbdbe5fea78c7ae0b2c3364bc9572f (diff) | |
download | blackbird-op-linux-ec7ae517537ae5c7b0b2cd7f562dfa3e7a05b954.tar.gz blackbird-op-linux-ec7ae517537ae5c7b0b2cd7f562dfa3e7a05b954.zip |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (204 commits)
[SCSI] qla4xxx: export address/port of connection (fix udev disk names)
[SCSI] ipr: Fix BUG on adapter dump timeout
[SCSI] megaraid_sas: Fix instance access in megasas_reset_timer
[SCSI] hpsa: change confusing message to be more clear
[SCSI] iscsi class: fix vlan configuration
[SCSI] qla4xxx: fix data alignment and use nl helpers
[SCSI] iscsi class: fix link local mispelling
[SCSI] iscsi class: Replace iscsi_get_next_target_id with IDA
[SCSI] aacraid: use lower snprintf() limit
[SCSI] lpfc 8.3.27: Change driver version to 8.3.27
[SCSI] lpfc 8.3.27: T10 additions for SLI4
[SCSI] lpfc 8.3.27: Fix queue allocation failure recovery
[SCSI] lpfc 8.3.27: Change algorithm for getting physical port name
[SCSI] lpfc 8.3.27: Changed worst case mailbox timeout
[SCSI] lpfc 8.3.27: Miscellanous logic and interface fixes
[SCSI] megaraid_sas: Changelog and version update
[SCSI] megaraid_sas: Add driver workaround for PERC5/1068 kdump kernel panic
[SCSI] megaraid_sas: Add multiple MSI-X vector/multiple reply queue support
[SCSI] megaraid_sas: Add support for MegaRAID 9360/9380 12GB/s controllers
[SCSI] megaraid_sas: Clear FUSION_IN_RESET before enabling interrupts
...
Diffstat (limited to 'drivers')
144 files changed, 13248 insertions, 3194 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 32fc41c1da30..c04ad68cb602 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -6713,6 +6713,7 @@ EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); EXPORT_SYMBOL_GPL(ata_scsi_slave_config); EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); +EXPORT_SYMBOL_GPL(__ata_change_queue_depth); EXPORT_SYMBOL_GPL(sata_scr_valid); EXPORT_SYMBOL_GPL(sata_scr_read); EXPORT_SYMBOL_GPL(sata_scr_write); diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 46d087f08607..19ba77032ac2 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -1215,25 +1215,15 @@ void ata_scsi_slave_destroy(struct scsi_device *sdev) } /** - * ata_scsi_change_queue_depth - SCSI callback for queue depth config - * @sdev: SCSI device to configure queue depth for - * @queue_depth: new queue depth - * @reason: calling context - * - * This is libata standard hostt->change_queue_depth callback. - * SCSI will call into this callback when user tries to set queue - * depth via sysfs. + * __ata_change_queue_depth - helper for ata_scsi_change_queue_depth * - * LOCKING: - * SCSI layer (we don't care) + * libsas and libata have different approaches for associating a sdev to + * its ata_port. * - * RETURNS: - * Newly configured queue depth. */ -int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, - int reason) +int __ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, + int queue_depth, int reason) { - struct ata_port *ap = ata_shost_to_port(sdev->host); struct ata_device *dev; unsigned long flags; @@ -1269,6 +1259,30 @@ int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, } /** + * ata_scsi_change_queue_depth - SCSI callback for queue depth config + * @sdev: SCSI device to configure queue depth for + * @queue_depth: new queue depth + * @reason: calling context + * + * This is libata standard hostt->change_queue_depth callback. + * SCSI will call into this callback when user tries to set queue + * depth via sysfs. + * + * LOCKING: + * SCSI layer (we don't care) + * + * RETURNS: + * Newly configured queue depth. + */ +int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth, + int reason) +{ + struct ata_port *ap = ata_shost_to_port(sdev->host); + + return __ata_change_queue_depth(ap, sdev, queue_depth, reason); +} + +/** * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command * @qc: Storage for translated ATA taskfile * diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9c61b9c2c597..84e8c293a715 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -632,6 +632,59 @@ iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) iser_conn_terminate(ib_conn); } +static mode_t iser_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + static struct scsi_host_template iscsi_iser_sht = { .module = THIS_MODULE, .name = "iSCSI Initiator over iSER, v." DRV_VER, @@ -653,32 +706,6 @@ static struct iscsi_transport iscsi_iser_transport = { .owner = THIS_MODULE, .name = "iser", .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | - ISCSI_HOST_NETDEV_NAME | - ISCSI_HOST_INITIATOR_NAME, /* session management */ .create_session = iscsi_iser_session_create, .destroy_session = iscsi_iser_session_destroy, @@ -686,6 +713,7 @@ static struct iscsi_transport iscsi_iser_transport = { .create_conn = iscsi_iser_conn_create, .bind_conn = iscsi_iser_conn_bind, .destroy_conn = iscsi_iser_conn_destroy, + .attr_is_visible = iser_attr_is_visible, .set_param = iscsi_iser_set_param, .get_conn_param = iscsi_conn_get_param, .get_ep_param = iscsi_iser_get_ep_param, diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c index 7956a10f9488..e9c6a6047a00 100644 --- a/drivers/message/fusion/mptbase.c +++ b/drivers/message/fusion/mptbase.c @@ -63,6 +63,8 @@ #ifdef CONFIG_MTRR #include <asm/mtrr.h> #endif +#include <linux/kthread.h> +#include <scsi/scsi_host.h> #include "mptbase.h" #include "lsi/mpi_log_fc.h" @@ -323,6 +325,32 @@ mpt_is_discovery_complete(MPT_ADAPTER *ioc) return rc; } + +/** + * mpt_remove_dead_ioc_func - kthread context to remove dead ioc + * @arg: input argument, used to derive ioc + * + * Return 0 if controller is removed from pci subsystem. + * Return -1 for other case. + */ +static int mpt_remove_dead_ioc_func(void *arg) +{ + MPT_ADAPTER *ioc = (MPT_ADAPTER *)arg; + struct pci_dev *pdev; + + if ((ioc == NULL)) + return -1; + + pdev = ioc->pcidev; + if ((pdev == NULL)) + return -1; + + pci_remove_bus_device(pdev); + return 0; +} + + + /** * mpt_fault_reset_work - work performed on workq after ioc fault * @work: input argument, used to derive ioc @@ -336,12 +364,45 @@ mpt_fault_reset_work(struct work_struct *work) u32 ioc_raw_state; int rc; unsigned long flags; + MPT_SCSI_HOST *hd; + struct task_struct *p; if (ioc->ioc_reset_in_progress || !ioc->active) goto out; + ioc_raw_state = mpt_GetIocState(ioc, 0); - if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) { + if ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_MASK) { + printk(MYIOC_s_INFO_FMT "%s: IOC is non-operational !!!!\n", + ioc->name, __func__); + + /* + * Call mptscsih_flush_pending_cmds callback so that we + * flush all pending commands back to OS. + * This call is required to aovid deadlock at block layer. + * Dead IOC will fail to do diag reset,and this call is safe + * since dead ioc will never return any command back from HW. + */ + hd = shost_priv(ioc->sh); + ioc->schedule_dead_ioc_flush_running_cmds(hd); + + /*Remove the Dead Host */ + p = kthread_run(mpt_remove_dead_ioc_func, ioc, + "mpt_dead_ioc_%d", ioc->id); + if (IS_ERR(p)) { + printk(MYIOC_s_ERR_FMT + "%s: Running mpt_dead_ioc thread failed !\n", + ioc->name, __func__); + } else { + printk(MYIOC_s_WARN_FMT + "%s: Running mpt_dead_ioc thread success !\n", + ioc->name, __func__); + } + return; /* don't rearm timer */ + } + + if ((ioc_raw_state & MPI_IOC_STATE_MASK) + == MPI_IOC_STATE_FAULT) { printk(MYIOC_s_WARN_FMT "IOC is in FAULT state (%04xh)!!!\n", ioc->name, ioc_raw_state & MPI_DOORBELL_DATA_MASK); printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n", @@ -6413,8 +6474,19 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg) pReq->Action, ioc->mptbase_cmds.status, timeleft)); if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) goto out; - if (!timeleft) + if (!timeleft) { + spin_lock_irqsave(&ioc->taskmgmt_lock, flags); + if (ioc->ioc_reset_in_progress) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, + flags); + printk(MYIOC_s_INFO_FMT "%s: host reset in" + " progress mpt_config timed out.!!\n", + __func__, ioc->name); + return -EFAULT; + } + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); issue_hard_reset = 1; + } goto out; } @@ -7128,7 +7200,18 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) spin_lock_irqsave(&ioc->taskmgmt_lock, flags); if (ioc->ioc_reset_in_progress) { spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); - return 0; + ioc->wait_on_reset_completion = 1; + do { + ssleep(1); + } while (ioc->ioc_reset_in_progress == 1); + ioc->wait_on_reset_completion = 0; + return ioc->reset_status; + } + if (ioc->wait_on_reset_completion) { + spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); + rc = 0; + time_count = jiffies; + goto exit; } ioc->ioc_reset_in_progress = 1; if (ioc->alt_ioc) @@ -7165,6 +7248,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) ioc->ioc_reset_in_progress = 0; ioc->taskmgmt_quiesce_io = 0; ioc->taskmgmt_in_progress = 0; + ioc->reset_status = rc; if (ioc->alt_ioc) { ioc->alt_ioc->ioc_reset_in_progress = 0; ioc->alt_ioc->taskmgmt_quiesce_io = 0; @@ -7180,7 +7264,7 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag) ioc->alt_ioc, MPT_IOC_POST_RESET); } } - +exit: dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler: completed (%d seconds): %s\n", ioc->name, diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index fe902338539b..b4d24dc081ae 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h @@ -76,8 +76,8 @@ #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR #endif -#define MPT_LINUX_VERSION_COMMON "3.04.19" -#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.19" +#define MPT_LINUX_VERSION_COMMON "3.04.20" +#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.20" #define WHAT_MAGIC_STRING "@" "(" "#" ")" #define show_mptmod_ver(s,ver) \ @@ -554,10 +554,47 @@ struct mptfc_rport_info u8 flags; }; +/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ + +/* + * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers + * Private to the driver. + */ + +#define MPT_HOST_BUS_UNKNOWN (0xFF) +#define MPT_HOST_TOO_MANY_TM (0x05) +#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) +#define MPT_HOST_NO_CHAIN (0xFFFFFFFF) +#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF) +#define MPT_NVRAM_SYNC_MASK (0x0000FF00) +#define MPT_NVRAM_SYNC_SHIFT (8) +#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000) +#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000) +#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000) +#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000) +#define MPT_NVRAM_WIDE_DISABLE (0x00100000) +#define MPT_NVRAM_BOOT_CHOICE (0x00200000) + +typedef enum { + FC, + SPI, + SAS +} BUS_TYPE; + +typedef struct _MPT_SCSI_HOST { + struct _MPT_ADAPTER *ioc; + ushort sel_timeout[MPT_MAX_FC_DEVICES]; + char *info_kbuf; + long last_queue_full; + u16 spi_pending; + struct list_head target_reset_list; +} MPT_SCSI_HOST; + typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr); typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr); typedef void (*MPT_SCHEDULE_TARGET_RESET)(void *ioc); +typedef void (*MPT_FLUSH_RUNNING_CMDS)(MPT_SCSI_HOST *hd); /* * Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS @@ -716,7 +753,10 @@ typedef struct _MPT_ADAPTER int taskmgmt_in_progress; u8 taskmgmt_quiesce_io; u8 ioc_reset_in_progress; + u8 reset_status; + u8 wait_on_reset_completion; MPT_SCHEDULE_TARGET_RESET schedule_target_reset; + MPT_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds; struct work_struct sas_persist_task; struct work_struct fc_setup_reset_work; @@ -830,19 +870,6 @@ typedef struct _MPT_LOCAL_REPLY { u32 pad; } MPT_LOCAL_REPLY; -#define MPT_HOST_BUS_UNKNOWN (0xFF) -#define MPT_HOST_TOO_MANY_TM (0x05) -#define MPT_HOST_NVRAM_INVALID (0xFFFFFFFF) -#define MPT_HOST_NO_CHAIN (0xFFFFFFFF) -#define MPT_NVRAM_MASK_TIMEOUT (0x000000FF) -#define MPT_NVRAM_SYNC_MASK (0x0000FF00) -#define MPT_NVRAM_SYNC_SHIFT (8) -#define MPT_NVRAM_DISCONNECT_ENABLE (0x00010000) -#define MPT_NVRAM_ID_SCAN_ENABLE (0x00020000) -#define MPT_NVRAM_LUN_SCAN_ENABLE (0x00040000) -#define MPT_NVRAM_TAG_QUEUE_ENABLE (0x00080000) -#define MPT_NVRAM_WIDE_DISABLE (0x00100000) -#define MPT_NVRAM_BOOT_CHOICE (0x00200000) /* The TM_STATE variable is used to provide strict single threading of TM * requests as well as communicate TM error conditions. @@ -851,21 +878,6 @@ typedef struct _MPT_LOCAL_REPLY { #define TM_STATE_IN_PROGRESS (1) #define TM_STATE_ERROR (2) -typedef enum { - FC, - SPI, - SAS -} BUS_TYPE; - -typedef struct _MPT_SCSI_HOST { - MPT_ADAPTER *ioc; - ushort sel_timeout[MPT_MAX_FC_DEVICES]; - char *info_kbuf; - long last_queue_full; - u16 spi_pending; - struct list_head target_reset_list; -} MPT_SCSI_HOST; - /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/ /* * More Dynamic Multi-Pathing stuff... diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c index 7596aecd5072..9d9504298549 100644 --- a/drivers/message/fusion/mptsas.c +++ b/drivers/message/fusion/mptsas.c @@ -92,6 +92,11 @@ static int max_lun = MPTSAS_MAX_LUN; module_param(max_lun, int, 0); MODULE_PARM_DESC(max_lun, " max lun, default=16895 "); +static int mpt_loadtime_max_sectors = 8192; +module_param(mpt_loadtime_max_sectors, int, 0); +MODULE_PARM_DESC(mpt_loadtime_max_sectors, + " Maximum sector define for Host Bus Adaptor.Range 64 to 8192 default=8192"); + static u8 mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS; static u8 mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */ @@ -285,10 +290,11 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, spin_lock_irqsave(&ioc->fw_event_lock, flags); list_add_tail(&fw_event->list, &ioc->fw_event_list); INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work); - devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n", - ioc->name, __func__, fw_event)); - queue_delayed_work(ioc->fw_event_q, &fw_event->work, - delay); + devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)" + "on cpuid %d\n", ioc->name, __func__, + fw_event, smp_processor_id())); + queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q, + &fw_event->work, delay); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } @@ -300,10 +306,11 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event, unsigned long flags; spin_lock_irqsave(&ioc->fw_event_lock, flags); devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task " - "(fw_event=0x%p)\n", ioc->name, __func__, fw_event)); + "(fw_event=0x%p)on cpuid %d\n", ioc->name, __func__, + fw_event, smp_processor_id())); fw_event->retries++; - queue_delayed_work(ioc->fw_event_q, &fw_event->work, - msecs_to_jiffies(delay)); + queue_delayed_work_on(smp_processor_id(), ioc->fw_event_q, + &fw_event->work, msecs_to_jiffies(delay)); spin_unlock_irqrestore(&ioc->fw_event_lock, flags); } @@ -1943,6 +1950,15 @@ static enum blk_eh_timer_return mptsas_eh_timed_out(struct scsi_cmnd *sc) goto done; } + /* In case if IOC is in reset from internal context. + * Do not execute EEH for the same IOC. SML should to reset timer. + */ + if (ioc->ioc_reset_in_progress) { + dtmprintk(ioc, printk(MYIOC_s_WARN_FMT ": %s: ioc is in reset," + "SML need to reset the timer (sc=%p)\n", + ioc->name, __func__, sc)); + rc = BLK_EH_RESET_TIMER; + } vdevice = sc->device->hostdata; if (vdevice && vdevice->vtarget && (vdevice->vtarget->inDMD || vdevice->vtarget->deleted)) { @@ -5142,6 +5158,8 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) ioc->TaskCtx = mptsasTaskCtx; ioc->InternalCtx = mptsasInternalCtx; ioc->schedule_target_reset = &mptsas_schedule_target_reset; + ioc->schedule_dead_ioc_flush_running_cmds = + &mptscsih_flush_running_cmds; /* Added sanity check on readiness of the MPT adapter. */ if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) { @@ -5239,6 +5257,21 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id) sh->sg_tablesize = numSGE; } + if (mpt_loadtime_max_sectors) { + if (mpt_loadtime_max_sectors < 64 || + mpt_loadtime_max_sectors > 8192) { + printk(MYIOC_s_INFO_FMT "Invalid value passed for" + "mpt_loadtime_max_sectors %d." + "Range from 64 to 8192\n", ioc->name, + mpt_loadtime_max_sectors); + } + mpt_loadtime_max_sectors &= 0xFFFFFFFE; + dprintk(ioc, printk(MYIOC_s_DEBUG_FMT + "Resetting max sector to %d from %d\n", + ioc->name, mpt_loadtime_max_sectors, sh->max_sectors)); + sh->max_sectors = mpt_loadtime_max_sectors; + } + hd = shost_priv(sh); hd->ioc = ioc; diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index ce61a5769765..0c3ced70707b 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c @@ -830,7 +830,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr) if ((pScsiReq->CDB[0] == READ_6 && ((pScsiReq->CDB[1] & 0x02) == 0)) || pScsiReq->CDB[0] == READ_10 || pScsiReq->CDB[0] == READ_12 || - pScsiReq->CDB[0] == READ_16 || + (pScsiReq->CDB[0] == READ_16 && + ((pScsiReq->CDB[1] & 0x02) == 0)) || pScsiReq->CDB[0] == VERIFY || pScsiReq->CDB[0] == VERIFY_16) { if (scsi_bufflen(sc) != @@ -1024,7 +1025,7 @@ out: * * Must be called while new I/Os are being queued. */ -static void +void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) { MPT_ADAPTER *ioc = hd->ioc; @@ -1055,6 +1056,7 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd) sc->scsi_done(sc); } } +EXPORT_SYMBOL(mptscsih_flush_running_cmds); /* * mptscsih_search_running_cmds - Delete any commands associated @@ -1629,7 +1631,13 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, return 0; } - if (ioc_raw_state & MPI_DOORBELL_ACTIVE) { + /* DOORBELL ACTIVE check is not required if + * MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q is supported. + */ + + if (!((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) + && (ioc->facts.MsgVersion >= MPI_VERSION_01_05)) && + (ioc_raw_state & MPI_DOORBELL_ACTIVE)) { printk(MYIOC_s_WARN_FMT "TaskMgmt type=%x: ioc_state: " "DOORBELL_ACTIVE (0x%x)!\n", @@ -1728,7 +1736,9 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!! doorbell=0x%08x\n", ioc->name, __func__, mpt_GetIocState(ioc, 0)); - retval = mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); + retval = (ioc->bus_type == SAS) ? + mpt_HardResetHandler(ioc, CAN_SLEEP) : + mpt_Soft_Hard_ResetHandler(ioc, CAN_SLEEP); mpt_free_msg_frame(ioc, mf); } diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h index 45a5ff3eff61..43e75ff39921 100644 --- a/drivers/message/fusion/mptscsih.h +++ b/drivers/message/fusion/mptscsih.h @@ -135,3 +135,4 @@ extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id); extern struct device_attribute *mptscsih_host_attrs[]; extern struct scsi_cmnd *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i); extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code); +extern void mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd); diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9a122280246c..6547ff469410 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c @@ -160,7 +160,8 @@ again: DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); + q->nr, q->first_to_kick, count, + q->irq_ptr->int_parm); return 0; } return count - tmp_count; @@ -206,7 +207,8 @@ again: DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, q->irq_ptr->int_parm); + q->nr, q->first_to_kick, count, + q->irq_ptr->int_parm); return 0; } WARN_ON(tmp_count); @@ -1070,6 +1072,7 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, { struct qdio_irq *irq_ptr = cdev->private->qdio_data; struct qdio_q *q; + int count; DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); DBF_ERROR("intp :%lx", intparm); @@ -1083,8 +1086,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, dump_stack(); goto no_handler; } + + count = sub_buf(q->first_to_check, q->first_to_kick); q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION, - 0, -1, -1, irq_ptr->int_parm); + q->nr, q->first_to_kick, count, irq_ptr->int_parm); no_handler: qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); } diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index dd8bd670a6b8..d9a46a429bcc 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c @@ -381,6 +381,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr, int i; irq_ptr->qdr->qfmt = qdio_init->q_format; + irq_ptr->qdr->ac = qdio_init->qdr_ac; irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 96d1462e0bf5..967e7b70e977 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c @@ -163,6 +163,42 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req) spin_unlock_irqrestore(&dbf->hba_lock, flags); } +/** + * zfcp_dbf_hba_def_err - trace event for deferred error messages + * @adapter: pointer to struct zfcp_adapter + * @req_id: request id which caused the deferred error message + * @scount: number of sbals incl. the signaling sbal + * @pl: array of all involved sbals + */ +void zfcp_dbf_hba_def_err(struct zfcp_adapter *adapter, u64 req_id, u16 scount, + void **pl) +{ + struct zfcp_dbf *dbf = adapter->dbf; + struct zfcp_dbf_pay *payload = &dbf->pay_buf; + unsigned long flags; + u16 length; + + if (!pl) + return; + + spin_lock_irqsave(&dbf->pay_lock, flags); + memset(payload, 0, sizeof(*payload)); + + memcpy(payload->area, "def_err", 7); + payload->fsf_req_id = req_id; + payload->counter = 0; + length = min((u16)sizeof(struct qdio_buffer), + (u16)ZFCP_DBF_PAY_MAX_REC); + + while ((char *)pl[payload->counter] && payload->counter < scount) { + memcpy(payload->data, (char *)pl[payload->counter], length); + debug_event(dbf->pay, 1, payload, zfcp_dbf_plen(length)); + payload->counter++; + } + + spin_unlock_irqrestore(&dbf->pay_lock, flags); +} + static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec, struct zfcp_adapter *adapter, struct zfcp_port *port, diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 527ba48eea57..ed5d921e82cd 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h @@ -72,6 +72,7 @@ struct zfcp_reqlist; #define ZFCP_STATUS_COMMON_NOESC 0x00200000 /* adapter status */ +#define ZFCP_STATUS_ADAPTER_MB_ACT 0x00000001 #define ZFCP_STATUS_ADAPTER_QDIOUP 0x00000002 #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED 0x00000004 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK 0x00000008 @@ -314,4 +315,10 @@ struct zfcp_fsf_req { void (*handler)(struct zfcp_fsf_req *); }; +static inline +int zfcp_adapter_multi_buffer_active(struct zfcp_adapter *adapter) +{ + return atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_MB_ACT; +} + #endif /* ZFCP_DEF_H */ diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index 03627cfd81cd..2302e1cfb76c 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h @@ -53,6 +53,7 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *); +extern void zfcp_dbf_hba_def_err(struct zfcp_adapter *, u64, u16, void **); extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32); extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *); extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *); diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 022fb6a8cb83..e9a787e2e6a5 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -936,39 +936,47 @@ static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, struct scatterlist *sg_resp) { struct zfcp_adapter *adapter = req->adapter; + struct zfcp_qdio *qdio = adapter->qdio; + struct fsf_qtcb *qtcb = req->qtcb; u32 feat = adapter->adapter_features; - int bytes; - if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) { - if (!zfcp_qdio_sg_one_sbale(sg_req) || - !zfcp_qdio_sg_one_sbale(sg_resp)) - return -EOPNOTSUPP; + if (zfcp_adapter_multi_buffer_active(adapter)) { + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) + return -EIO; + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) + return -EIO; - zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, - sg_req, sg_resp); + zfcp_qdio_set_data_div(qdio, &req->qdio_req, + zfcp_qdio_sbale_count(sg_req)); + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + zfcp_qdio_set_scount(qdio, &req->qdio_req); return 0; } /* use single, unchained SBAL if it can hold the request */ if (zfcp_qdio_sg_one_sbale(sg_req) && zfcp_qdio_sg_one_sbale(sg_resp)) { - zfcp_fsf_setup_ct_els_unchained(adapter->qdio, &req->qdio_req, + zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, sg_req, sg_resp); return 0; } - bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, sg_req); - if (bytes <= 0) + if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) + return -EOPNOTSUPP; + + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) return -EIO; - zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); - req->qtcb->bottom.support.req_buf_length = bytes; - zfcp_qdio_skip_to_last_sbale(&req->qdio_req); - bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->qdio_req, - sg_resp); - req->qtcb->bottom.support.resp_buf_length = bytes; - if (bytes <= 0) + qtcb->bottom.support.req_buf_length = zfcp_qdio_real_bytes(sg_req); + + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); + + if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) return -EIO; - zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); + + qtcb->bottom.support.resp_buf_length = zfcp_qdio_real_bytes(sg_resp); + + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); return 0; } @@ -1119,7 +1127,8 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id, req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; - zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); + if (!zfcp_adapter_multi_buffer_active(adapter)) + zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); @@ -2162,7 +2171,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) struct zfcp_fsf_req *req; struct fcp_cmnd *fcp_cmnd; u8 sbtype = SBAL_SFLAGS0_TYPE_READ; - int real_bytes, retval = -EIO, dix_bytes = 0; + int retval = -EIO; struct scsi_device *sdev = scsi_cmnd->device; struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); struct zfcp_adapter *adapter = zfcp_sdev->port->adapter; @@ -2207,7 +2216,8 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) io->ref_tag_value = scsi_get_lba(scsi_cmnd) & 0xFFFFFFFF; } - zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction); + if (zfcp_fsf_set_data_dir(scsi_cmnd, &io->data_direction)) + goto failed_scsi_cmnd; fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; zfcp_fc_scsi_to_fcp(fcp_cmnd, scsi_cmnd, 0); @@ -2215,18 +2225,22 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd) if (scsi_prot_sg_count(scsi_cmnd)) { zfcp_qdio_set_data_div(qdio, &req->qdio_req, scsi_prot_sg_count(scsi_cmnd)); - dix_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + scsi_prot_sglist(scsi_cmnd)); + if (retval) + goto failed_scsi_cmnd; + io->prot_data_length = zfcp_qdio_real_bytes( scsi_prot_sglist(scsi_cmnd)); - io->prot_data_length = dix_bytes; } - real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, - scsi_sglist(scsi_cmnd)); - - if (unlikely(real_bytes < 0) || unlikely(dix_bytes < 0)) + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, + scsi_sglist(scsi_cmnd)); + if (unlikely(retval)) goto failed_scsi_cmnd; zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); + if (zfcp_adapter_multi_buffer_active(adapter)) + zfcp_qdio_set_scount(qdio, &req->qdio_req); retval = zfcp_fsf_req_send(req); if (unlikely(retval)) @@ -2328,7 +2342,7 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, struct zfcp_qdio *qdio = adapter->qdio; struct zfcp_fsf_req *req = NULL; struct fsf_qtcb_bottom_support *bottom; - int retval = -EIO, bytes; + int retval = -EIO; u8 direction; if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) @@ -2361,13 +2375,17 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter, bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE; bottom->option = fsf_cfdc->option; - bytes = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); + retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, fsf_cfdc->sg); - if (bytes != ZFCP_CFDC_MAX_SIZE) { + if (retval || + (zfcp_qdio_real_bytes(fsf_cfdc->sg) != ZFCP_CFDC_MAX_SIZE)) { zfcp_fsf_req_free(req); + retval = -EIO; goto out; } - zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); + zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); + if (zfcp_adapter_multi_buffer_active(adapter)) + zfcp_qdio_set_scount(qdio, &req->qdio_req); zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); retval = zfcp_fsf_req_send(req); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index d9c40ea73eef..df9e69f54742 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -15,6 +15,10 @@ #define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer)) +static bool enable_multibuffer; +module_param_named(datarouter, enable_multibuffer, bool, 0400); +MODULE_PARM_DESC(datarouter, "Enable hardware data router support"); + static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal) { int pos; @@ -37,8 +41,11 @@ static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n"); - if (qdio_err & QDIO_ERROR_SLSB_STATE) + if (qdio_err & QDIO_ERROR_SLSB_STATE) { zfcp_qdio_siosl(adapter); + zfcp_erp_adapter_shutdown(adapter, 0, id); + return; + } zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED | ZFCP_STATUS_COMMON_ERP_FAILED, id); @@ -93,9 +100,27 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err, unsigned long parm) { struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm; - int sbal_idx, sbal_no; + struct zfcp_adapter *adapter = qdio->adapter; + struct qdio_buffer_element *sbale; + int sbal_no, sbal_idx; + void *pl[ZFCP_QDIO_MAX_SBALS_PER_REQ + 1]; + u64 req_id; + u8 scount; if (unlikely(qdio_err)) { + memset(pl, 0, ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *)); + if (zfcp_adapter_multi_buffer_active(adapter)) { + sbale = qdio->res_q[idx]->element; + req_id = (u64) sbale->addr; + scount = sbale->scount + 1; /* incl. signaling SBAL */ + + for (sbal_no = 0; sbal_no < scount; sbal_no++) { + sbal_idx = (idx + sbal_no) % + QDIO_MAX_BUFFERS_PER_Q; + pl[sbal_no] = qdio->res_q[sbal_idx]; + } + zfcp_dbf_hba_def_err(adapter, req_id, scount, pl); + } zfcp_qdio_handler_error(qdio, "qdires1", qdio_err); return; } @@ -155,7 +180,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) static struct qdio_buffer_element * zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) { - if (q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL) + if (q_req->sbale_curr == qdio->max_sbale_per_sbal - 1) return zfcp_qdio_sbal_chain(qdio, q_req); q_req->sbale_curr++; return zfcp_qdio_sbale_curr(qdio, q_req); @@ -167,13 +192,12 @@ zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) * @q_req: pointer to struct zfcp_qdio_req * @sg: scatter-gather list * @max_sbals: upper bound for number of SBALs to be used - * Returns: number of bytes, or error (negativ) + * Returns: zero or -EINVAL on error */ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, struct scatterlist *sg) { struct qdio_buffer_element *sbale; - int bytes = 0; /* set storage-block type for this request */ sbale = zfcp_qdio_sbale_req(qdio, q_req); @@ -187,14 +211,10 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, q_req->sbal_number); return -EINVAL; } - sbale->addr = sg_virt(sg); sbale->length = sg->length; - - bytes += sg->length; } - - return bytes; + return 0; } static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) @@ -283,6 +303,8 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); ASCEBC(id->adapter_name, 8); id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; + if (enable_multibuffer) + id->qdr_ac |= QDR_AC_MULTI_BUFFER_ENABLE; id->no_input_qs = 1; id->no_output_qs = 1; id->input_handler = zfcp_qdio_int_resp; @@ -378,6 +400,17 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) atomic_set_mask(ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED, &qdio->adapter->status); + if (ssqd.qdioac2 & CHSC_AC2_MULTI_BUFFER_ENABLED) { + atomic_set_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER; + } else { + atomic_clear_mask(ZFCP_STATUS_ADAPTER_MB_ACT, &adapter->status); + qdio->max_sbale_per_sbal = QDIO_MAX_ELEMENTS_PER_BUFFER - 1; + } + + qdio->max_sbale_per_req = + ZFCP_QDIO_MAX_SBALS_PER_REQ * qdio->max_sbale_per_sbal + - 2; if (qdio_activate(cdev)) goto failed_qdio; @@ -397,6 +430,11 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio) atomic_set(&qdio->req_q_free, QDIO_MAX_BUFFERS_PER_Q); atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status); + if (adapter->scsi_host) { + adapter->scsi_host->sg_tablesize = qdio->max_sbale_per_req; + adapter->scsi_host->max_sectors = qdio->max_sbale_per_req * 8; + } + return 0; failed_qdio: diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h index 54e22ace012b..8ac7f5342d29 100644 --- a/drivers/s390/scsi/zfcp_qdio.h +++ b/drivers/s390/scsi/zfcp_qdio.h @@ -13,20 +13,9 @@ #define ZFCP_QDIO_SBALE_LEN PAGE_SIZE -/* DMQ bug workaround: don't use last SBALE */ -#define ZFCP_QDIO_MAX_SBALES_PER_SBAL (QDIO_MAX_ELEMENTS_PER_BUFFER - 1) - -/* index of last SBALE (with respect to DMQ bug workaround) */ -#define ZFCP_QDIO_LAST_SBALE_PER_SBAL (ZFCP_QDIO_MAX_SBALES_PER_SBAL - 1) - /* Max SBALS for chaining */ #define ZFCP_QDIO_MAX_SBALS_PER_REQ 36 -/* max. number of (data buffer) SBALEs in largest SBAL chain - * request ID + QTCB in SBALE 0 + 1 of first SBAL in chain */ -#define ZFCP_QDIO_MAX_SBALES_PER_REQ \ - (ZFCP_QDIO_MAX_SBALS_PER_REQ * ZFCP_QDIO_MAX_SBALES_PER_SBAL - 2) - /** * struct zfcp_qdio - basic qdio data structure * @res_q: response queue @@ -53,6 +42,8 @@ struct zfcp_qdio { atomic_t req_q_full; wait_queue_head_t req_q_wq; struct zfcp_adapter *adapter; + u16 max_sbale_per_sbal; + u16 max_sbale_per_req; }; /** @@ -155,7 +146,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, { struct qdio_buffer_element *sbale; - BUG_ON(q_req->sbale_curr == ZFCP_QDIO_LAST_SBALE_PER_SBAL); + BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1); q_req->sbale_curr++; sbale = zfcp_qdio_sbale_curr(qdio, q_req); sbale->addr = data; @@ -195,9 +186,10 @@ int zfcp_qdio_sg_one_sbale(struct scatterlist *sg) * @q_req: The current zfcp_qdio_req */ static inline -void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio_req *q_req) +void zfcp_qdio_skip_to_last_sbale(struct zfcp_qdio *qdio, + struct zfcp_qdio_req *q_req) { - q_req->sbale_curr = ZFCP_QDIO_LAST_SBALE_PER_SBAL; + q_req->sbale_curr = qdio->max_sbale_per_sbal - 1; } /** @@ -228,8 +220,52 @@ void zfcp_qdio_set_data_div(struct zfcp_qdio *qdio, { struct qdio_buffer_element *sbale; - sbale = &qdio->req_q[q_req->sbal_first]->element[0]; + sbale = qdio->req_q[q_req->sbal_first]->element; sbale->length = count; } +/** + * zfcp_qdio_sbale_count - count sbale used + * @sg: pointer to struct scatterlist + */ +static inline +unsigned int zfcp_qdio_sbale_count(struct scatterlist *sg) +{ + unsigned int count = 0; + + for (; sg; sg = sg_next(sg)) + count++; + + return count; +} + +/** + * zfcp_qdio_real_bytes - count bytes used + * @sg: pointer to struct scatterlist + */ +static inline +unsigned int zfcp_qdio_real_bytes(struct scatterlist *sg) +{ + unsigned int real_bytes = 0; + + for (; sg; sg = sg_next(sg)) + real_bytes += sg->length; + + return real_bytes; +} + +/** + * zfcp_qdio_set_scount - set SBAL count value + * @qdio: pointer to struct zfcp_qdio + * @q_req: The current zfcp_qdio_req + */ +static inline +void zfcp_qdio_set_scount(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) +{ + struct qdio_buffer_element *sbale; + + sbale = qdio->req_q[q_req->sbal_first]->element; + sbale->scount = q_req->sbal_number - 1; +} + #endif /* ZFCP_QDIO_H */ diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 7cac873c7383..09126a9d62ff 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c @@ -24,11 +24,8 @@ module_param_named(queue_depth, default_depth, uint, 0600); MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices"); static bool enable_dif; - -#ifdef CONFIG_ZFCP_DIF -module_param_named(dif, enable_dif, bool, 0600); +module_param_named(dif, enable_dif, bool, 0400); MODULE_PARM_DESC(dif, "Enable DIF/DIX data integrity support"); -#endif static bool allow_lun_scan = 1; module_param(allow_lun_scan, bool, 0600); @@ -309,8 +306,8 @@ static struct scsi_host_template zfcp_scsi_host_template = { .proc_name = "zfcp", .can_queue = 4096, .this_id = -1, - .sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ, - .max_sectors = (ZFCP_QDIO_MAX_SBALES_PER_REQ * 8), + .sg_tablesize = 1, /* adjusted later */ + .max_sectors = 8, /* adjusted later */ .dma_boundary = ZFCP_QDIO_SBALE_LEN - 1, .cmd_per_lun = 1, .use_clustering = 1, @@ -668,9 +665,9 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter) adapter->adapter_features & FSF_FEATURE_DIX_PROT_TCPIP) { mask |= SHOST_DIX_TYPE1_PROTECTION; scsi_host_set_guard(shost, SHOST_DIX_GUARD_IP); - shost->sg_prot_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; - shost->sg_tablesize = ZFCP_QDIO_MAX_SBALES_PER_REQ / 2; - shost->max_sectors = ZFCP_QDIO_MAX_SBALES_PER_REQ * 8 / 2; + shost->sg_prot_tablesize = adapter->qdio->max_sbale_per_req / 2; + shost->sg_tablesize = adapter->qdio->max_sbale_per_req / 2; + shost->max_sectors = shost->sg_tablesize * 8; } scsi_host_set_prot(shost, mask); diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 3878b7395081..aa573c39f596 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -309,6 +309,7 @@ config SCSI_FC_TGT_ATTRS config SCSI_ISCSI_ATTRS tristate "iSCSI Transport Attributes" depends on SCSI && NET + select BLK_DEV_BSGLIB help If you wish to export transport-specific information about each attached iSCSI device to sysfs, say Y. @@ -559,6 +560,15 @@ source "drivers/scsi/aic7xxx/Kconfig.aic79xx" source "drivers/scsi/aic94xx/Kconfig" source "drivers/scsi/mvsas/Kconfig" +config SCSI_MVUMI + tristate "Marvell UMI driver" + depends on SCSI && PCI + help + Module for Marvell Universal Message Interface(UMI) driver + + To compile this driver as a module, choose M here: the + module will be called mvumi. + config SCSI_DPT_I2O tristate "Adaptec I2O RAID support " depends on SCSI && PCI && VIRT_TO_BUS @@ -1872,10 +1882,6 @@ config ZFCP called zfcp. If you want to compile it as a module, say M here and read <file:Documentation/kbuild/modules.txt>. -config ZFCP_DIF - tristate "T10 DIF/DIX support for the zfcp driver (EXPERIMENTAL)" - depends on ZFCP && EXPERIMENTAL - config SCSI_PMCRAID tristate "PMC SIERRA Linux MaxRAID adapter support" depends on PCI && SCSI && NET diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 6153a66a8a31..2b887498be50 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -134,6 +134,7 @@ obj-$(CONFIG_SCSI_IBMVFC) += ibmvscsi/ obj-$(CONFIG_SCSI_HPTIOP) += hptiop.o obj-$(CONFIG_SCSI_STEX) += stex.o obj-$(CONFIG_SCSI_MVSAS) += mvsas/ +obj-$(CONFIG_SCSI_MVUMI) += mvumi.o obj-$(CONFIG_PS3_ROM) += ps3rom.o obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ obj-$(CONFIG_SCSI_CXGB4_ISCSI) += libiscsi.o libiscsi_tcp.o cxgbi/ diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 3382475dc22d..4aa76d6f11df 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c @@ -894,16 +894,17 @@ static ssize_t aac_show_serial_number(struct device *device, int len = 0; if (le32_to_cpu(dev->adapter_info.serial[0]) != 0xBAD0) - len = snprintf(buf, PAGE_SIZE, "%06X\n", + len = snprintf(buf, 16, "%06X\n", le32_to_cpu(dev->adapter_info.serial[0])); if (len && !memcmp(&dev->supplement_adapter_info.MfgPcbaSerialNo[ sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo)-len], buf, len-1)) - len = snprintf(buf, PAGE_SIZE, "%.*s\n", + len = snprintf(buf, 16, "%.*s\n", (int)sizeof(dev->supplement_adapter_info.MfgPcbaSerialNo), dev->supplement_adapter_info.MfgPcbaSerialNo); - return len; + + return min(len, 16); } static ssize_t aac_show_max_channel(struct device *device, diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 29593275201a..fdac7c2fef37 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -906,6 +906,7 @@ int asd_control_phy(struct asd_sas_phy *phy, enum phy_func func, void *arg) switch (func) { case PHY_FUNC_CLEAR_ERROR_LOG: + case PHY_FUNC_GET_EVENTS: return -ENOSYS; case PHY_FUNC_SET_LINK_RATE: rates = arg; diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c index b8a82f2c62c8..cdb15364bc69 100644 --- a/drivers/scsi/be2iscsi/be_cmds.c +++ b/drivers/scsi/be2iscsi/be_cmds.c @@ -660,6 +660,7 @@ int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba, spin_lock(&phba->ctrl.mbox_lock); ctrl = &phba->ctrl; wrb = wrb_from_mbox(&ctrl->mbox_mem); + memset(wrb, 0, sizeof(*wrb)); req = embedded_payload(wrb); ctxt = &req->context; @@ -868,3 +869,22 @@ error: beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL); return status; } + +int beiscsi_cmd_reset_function(struct beiscsi_hba *phba) +{ + struct be_ctrl_info *ctrl = &phba->ctrl; + struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem); + struct be_post_sgl_pages_req *req = embedded_payload(wrb); + int status; + + spin_lock(&ctrl->mbox_lock); + + req = embedded_payload(wrb); + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, + OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); + status = be_mbox_notify_wait(phba); + + spin_unlock(&ctrl->mbox_lock); + return status; +} diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h index 497eb29e5c9e..8b40a5b4366c 100644 --- a/drivers/scsi/be2iscsi/be_cmds.h +++ b/drivers/scsi/be2iscsi/be_cmds.h @@ -561,6 +561,8 @@ int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, u32 page_offset, u32 num_pages); +int beiscsi_cmd_reset_function(struct beiscsi_hba *phba); + int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem, struct be_queue_info *wrbq); diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c index 3cad10605023..8b002f6db6ca 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.c +++ b/drivers/scsi/be2iscsi/be_iscsi.c @@ -177,9 +177,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session, { struct iscsi_conn *conn = cls_conn->dd_data; struct beiscsi_conn *beiscsi_conn = conn->dd_data; - struct Scsi_Host *shost = - (struct Scsi_Host *)iscsi_session_to_shost(cls_session); - struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); + struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); + struct beiscsi_hba *phba = iscsi_host_priv(shost); struct beiscsi_endpoint *beiscsi_ep; struct iscsi_endpoint *ep; @@ -290,7 +289,7 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn, int beiscsi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { - struct beiscsi_hba *phba = (struct beiscsi_hba *)iscsi_host_priv(shost); + struct beiscsi_hba *phba = iscsi_host_priv(shost); int status = 0; SE_DEBUG(DBG_LVL_8, "In beiscsi_get_host_param, param= %d\n", param); @@ -733,3 +732,56 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep) beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid); iscsi_destroy_endpoint(beiscsi_ep->openiscsi_ep); } + +mode_t be2iscsi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h index ff60b7fd92d6..4a1f2e393f31 100644 --- a/drivers/scsi/be2iscsi/be_iscsi.h +++ b/drivers/scsi/be2iscsi/be_iscsi.h @@ -26,6 +26,8 @@ #define BE2_IPV4 0x1 #define BE2_IPV6 0x10 +mode_t be2iscsi_attr_is_visible(int param_type, int param); + void beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn, struct beiscsi_offload_params *params); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 0a9bdfa3d939..7b0a8ab71049 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -822,33 +822,47 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; int ret, msix_vec, i, j; - char desc[32]; phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; if (phba->msix_enabled) { for (i = 0; i < phba->num_cpus; i++) { - sprintf(desc, "beiscsi_msix_%04x", i); + phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, + GFP_KERNEL); + if (!phba->msi_name[i]) { + ret = -ENOMEM; + goto free_msix_irqs; + } + + sprintf(phba->msi_name[i], "beiscsi_%02x_%02x", + phba->shost->host_no, i); msix_vec = phba->msix_entries[i].vector; - ret = request_irq(msix_vec, be_isr_msix, 0, desc, + ret = request_irq(msix_vec, be_isr_msix, 0, + phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-Failed to" "register msix for i = %d\n", i); - if (!i) - return ret; + kfree(phba->msi_name[i]); goto free_msix_irqs; } } + phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL); + if (!phba->msi_name[i]) { + ret = -ENOMEM; + goto free_msix_irqs; + } + sprintf(phba->msi_name[i], "beiscsi_mcc_%02x", + phba->shost->host_no); msix_vec = phba->msix_entries[i].vector; - ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc", + ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i], &phwi_context->be_eq[i]); if (ret) { shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-" "Failed to register beiscsi_msix_mcc\n"); - i++; + kfree(phba->msi_name[i]); goto free_msix_irqs; } @@ -863,8 +877,11 @@ static int beiscsi_init_irqs(struct beiscsi_hba *phba) } return 0; free_msix_irqs: - for (j = i - 1; j == 0; j++) + for (j = i - 1; j >= 0; j--) { + kfree(phba->msi_name[j]); + msix_vec = phba->msix_entries[j].vector; free_irq(msix_vec, &phwi_context->be_eq[j]); + } return ret; } @@ -1106,7 +1123,12 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn, & SOL_STS_MASK) >> 8); flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32] & SOL_FLAGS_MASK) >> 24) | 0x80; + if (!task->sc) { + if (io_task->scsi_cmnd) + scsi_dma_unmap(io_task->scsi_cmnd); + return; + } task->sc->result = (DID_OK << 16) | status; if (rsp != ISCSI_STATUS_CMD_COMPLETED) { task->sc->result = DID_ERROR << 16; @@ -4027,11 +4049,11 @@ static int beiscsi_mtask(struct iscsi_task *task) TGT_DM_CMD); AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 0); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); } else { AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb, INI_RD_CMD); - AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1); + AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0); } hwi_write_buffer(pwrb, task); break; @@ -4102,9 +4124,8 @@ static int beiscsi_task_xmit(struct iscsi_task *task) return beiscsi_iotask(task, sg, num_sg, xferlen, writedir); } -static void beiscsi_remove(struct pci_dev *pcidev) +static void beiscsi_quiesce(struct beiscsi_hba *phba) { - struct beiscsi_hba *phba = NULL; struct hwi_controller *phwi_ctrlr; struct hwi_context_memory *phwi_context; struct be_eq_obj *pbe_eq; @@ -4112,12 +4133,6 @@ static void beiscsi_remove(struct pci_dev *pcidev) u8 *real_offset = 0; u32 value = 0; - phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); - if (!phba) { - dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); - return; - } - phwi_ctrlr = phba->phwi_ctrlr; phwi_context = phwi_ctrlr->phwi_ctxt; hwi_disable_intr(phba); @@ -4125,6 +4140,7 @@ static void beiscsi_remove(struct pci_dev *pcidev) for (i = 0; i <= phba->num_cpus; i++) { msix_vec = phba->msix_entries[i].vector; free_irq(msix_vec, &phwi_context->be_eq[i]); + kfree(phba->msi_name[i]); } } else if (phba->pcidev->irq) @@ -4152,10 +4168,40 @@ static void beiscsi_remove(struct pci_dev *pcidev) phba->ctrl.mbox_mem_alloced.size, phba->ctrl.mbox_mem_alloced.va, phba->ctrl.mbox_mem_alloced.dma); +} + +static void beiscsi_remove(struct pci_dev *pcidev) +{ + + struct beiscsi_hba *phba = NULL; + + phba = pci_get_drvdata(pcidev); + if (!phba) { + dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n"); + return; + } + + beiscsi_quiesce(phba); iscsi_boot_destroy_kset(phba->boot_kset); iscsi_host_remove(phba->shost); pci_dev_put(phba->pcidev); iscsi_host_free(phba->shost); + pci_disable_device(pcidev); +} + +static void beiscsi_shutdown(struct pci_dev *pcidev) +{ + + struct beiscsi_hba *phba = NULL; + + phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev); + if (!phba) { + dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n"); + return; + } + + beiscsi_quiesce(phba); + pci_disable_device(pcidev); } static void beiscsi_msix_enable(struct beiscsi_hba *phba) @@ -4235,7 +4281,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev, gcrashmode++; shost_printk(KERN_ERR, phba->shost, "Loading Driver in crashdump mode\n"); - ret = beiscsi_pci_soft_reset(phba); + ret = beiscsi_cmd_reset_function(phba); if (ret) { shost_printk(KERN_ERR, phba->shost, "Reset Failed. Aborting Crashdump\n"); @@ -4364,37 +4410,12 @@ struct iscsi_transport beiscsi_iscsi_transport = { .name = DRV_NAME, .caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO | CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_ERL | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME, .create_session = beiscsi_session_create, .destroy_session = beiscsi_session_destroy, .create_conn = beiscsi_conn_create, .bind_conn = beiscsi_conn_bind, .destroy_conn = iscsi_conn_teardown, + .attr_is_visible = be2iscsi_attr_is_visible, .set_param = beiscsi_set_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, @@ -4418,6 +4439,7 @@ static struct pci_driver beiscsi_pci_driver = { .name = DRV_NAME, .probe = beiscsi_dev_probe, .remove = beiscsi_remove, + .shutdown = beiscsi_shutdown, .id_table = beiscsi_pci_id_table }; diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h index 5ce5170254ca..b4a06d5e5f9e 100644 --- a/drivers/scsi/be2iscsi/be_main.h +++ b/drivers/scsi/be2iscsi/be_main.h @@ -34,7 +34,7 @@ #include "be.h" #define DRV_NAME "be2iscsi" -#define BUILD_STR "2.103.298.0" +#define BUILD_STR "4.1.239.0" #define BE_NAME "ServerEngines BladeEngine2" \ "Linux iSCSI Driver version" BUILD_STR #define DRV_DESC BE_NAME " " "Driver" @@ -162,6 +162,8 @@ do { \ #define PAGES_REQUIRED(x) \ ((x < PAGE_SIZE) ? 1 : ((x + PAGE_SIZE - 1) / PAGE_SIZE)) +#define BEISCSI_MSI_NAME 20 /* size of msi_name string */ + enum be_mem_enum { HWI_MEM_ADDN_CONTEXT, HWI_MEM_WRB, @@ -287,6 +289,7 @@ struct beiscsi_hba { unsigned int num_cpus; unsigned int nxt_cqid; struct msix_entry msix_entries[MAX_CPUS + 1]; + char *msi_name[MAX_CPUS + 1]; bool msix_enabled; struct be_mem_descriptor *init_mem; diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h index dd335a2a797b..63de1c7cd0cb 100644 --- a/drivers/scsi/bnx2fc/bnx2fc.h +++ b/drivers/scsi/bnx2fc/bnx2fc.h @@ -62,7 +62,7 @@ #include "bnx2fc_constants.h" #define BNX2FC_NAME "bnx2fc" -#define BNX2FC_VERSION "1.0.4" +#define BNX2FC_VERSION "1.0.8" #define PFX "bnx2fc: " @@ -224,6 +224,7 @@ struct bnx2fc_interface { struct fcoe_ctlr ctlr; u8 vlan_enabled; int vlan_id; + bool enabled; }; #define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c index d66dcbd0df10..fd382fe33f6e 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_els.c +++ b/drivers/scsi/bnx2fc/bnx2fc_els.c @@ -391,18 +391,6 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) BNX2FC_IO_DBG(rec_req, "rec_compl: orig xid = 0x%x", orig_io_req->xid); tgt = orig_io_req->tgt; - if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { - BNX2FC_IO_DBG(rec_req, "completed" - "orig_io - 0x%x\n", - orig_io_req->xid); - goto rec_compl_done; - } - if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { - BNX2FC_IO_DBG(rec_req, "abts in prog " - "orig_io - 0x%x\n", - orig_io_req->xid); - goto rec_compl_done; - } /* Handle REC timeout case */ if (test_and_clear_bit(BNX2FC_FLAG_ELS_TIMEOUT, &rec_req->req_flags)) { BNX2FC_IO_DBG(rec_req, "timed out, abort " @@ -433,6 +421,20 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg) } goto rec_compl_done; } + + if (test_bit(BNX2FC_FLAG_IO_COMPL, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "completed" + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &orig_io_req->req_flags)) { + BNX2FC_IO_DBG(rec_req, "abts in prog " + "orig_io - 0x%x\n", + orig_io_req->xid); + goto rec_compl_done; + } + mp_req = &(rec_req->mp_req); fc_hdr = &(mp_req->resp_fc_hdr); resp_len = mp_req->resp_len; diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 820a1840c3f7..85bcc4b55965 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); #define DRV_MODULE_NAME "bnx2fc" #define DRV_MODULE_VERSION BNX2FC_VERSION -#define DRV_MODULE_RELDATE "Jun 23, 2011" +#define DRV_MODULE_RELDATE "Oct 02, 2011" static char version[] __devinitdata = @@ -56,6 +56,7 @@ static struct scsi_host_template bnx2fc_shost_template; static struct fc_function_template bnx2fc_transport_function; static struct fc_function_template bnx2fc_vport_xport_function; static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); +static void __bnx2fc_destroy(struct bnx2fc_interface *interface); static int bnx2fc_destroy(struct net_device *net_device); static int bnx2fc_enable(struct net_device *netdev); static int bnx2fc_disable(struct net_device *netdev); @@ -64,7 +65,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb); static void bnx2fc_start_disc(struct bnx2fc_interface *interface); static int bnx2fc_shost_config(struct fc_lport *lport, struct device *dev); -static int bnx2fc_net_config(struct fc_lport *lp); static int bnx2fc_lport_config(struct fc_lport *lport); static int bnx2fc_em_config(struct fc_lport *lport); static int bnx2fc_bind_adapter_devices(struct bnx2fc_hba *hba); @@ -78,6 +78,7 @@ static void bnx2fc_destroy_work(struct work_struct *work); static struct bnx2fc_hba *bnx2fc_hba_lookup(struct net_device *phys_dev); static struct bnx2fc_interface *bnx2fc_interface_lookup(struct net_device *phys_dev); +static inline void bnx2fc_interface_put(struct bnx2fc_interface *interface); static struct bnx2fc_hba *bnx2fc_find_hba_for_cnic(struct cnic_dev *cnic); static int bnx2fc_fw_init(struct bnx2fc_hba *hba); @@ -98,6 +99,25 @@ static struct notifier_block bnx2fc_cpu_notifier = { .notifier_call = bnx2fc_cpu_callback, }; +static inline struct net_device *bnx2fc_netdev(const struct fc_lport *lport) +{ + return ((struct bnx2fc_interface *) + ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; +} + +/** + * bnx2fc_get_lesb() - Fill the FCoE Link Error Status Block + * @lport: the local port + * @fc_lesb: the link error status block + */ +static void bnx2fc_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb) +{ + struct net_device *netdev = bnx2fc_netdev(lport); + + __fcoe_get_lesb(lport, fc_lesb, netdev); +} + static void bnx2fc_clean_rx_queue(struct fc_lport *lp) { struct fcoe_percpu_s *bg; @@ -545,6 +565,14 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) break; } } + + if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { + /* Drop incoming ABTS */ + put_cpu(); + kfree_skb(skb); + return; + } + if (le32_to_cpu(fr_crc(fp)) != ~crc32(~0, skb->data, fr_len)) { if (stats->InvalidCRCCount < 5) @@ -727,7 +755,7 @@ void bnx2fc_get_link_state(struct bnx2fc_hba *hba) clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state); } -static int bnx2fc_net_config(struct fc_lport *lport) +static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface; @@ -753,11 +781,16 @@ static int bnx2fc_net_config(struct fc_lport *lport) bnx2fc_link_speed_update(lport); if (!lport->vport) { - wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 1, 0); + if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) + wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + 1, 0); BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); fc_set_wwnn(lport, wwnn); - wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 2, 0); + if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) + wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, + 2, 0); + BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); fc_set_wwpn(lport, wwpn); } @@ -769,8 +802,8 @@ static void bnx2fc_destroy_timer(unsigned long data) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; - BNX2FC_MISC_DBG("ERROR:bnx2fc_destroy_timer - " - "Destroy compl not received!!\n"); + printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " + "Destroy compl not received!!\n"); set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); } @@ -783,7 +816,7 @@ static void bnx2fc_destroy_timer(unsigned long data) * @vlan_id: vlan id - associated vlan id with this event * * Handles NETDEV_UP, NETDEV_DOWN, NETDEV_GOING_DOWN,NETDEV_CHANGE and - * NETDEV_CHANGE_MTU events + * NETDEV_CHANGE_MTU events. Handle NETDEV_UNREGISTER only for vlans. */ static void bnx2fc_indicate_netevent(void *context, unsigned long event, u16 vlan_id) @@ -791,12 +824,11 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; struct fc_lport *lport; struct fc_lport *vport; - struct bnx2fc_interface *interface; + struct bnx2fc_interface *interface, *tmp; int wait_for_upload = 0; u32 link_possible = 1; - /* Ignore vlans for now */ - if (vlan_id != 0) + if (vlan_id != 0 && event != NETDEV_UNREGISTER) return; switch (event) { @@ -820,6 +852,18 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, case NETDEV_CHANGE: break; + case NETDEV_UNREGISTER: + if (!vlan_id) + return; + mutex_lock(&bnx2fc_dev_lock); + list_for_each_entry_safe(interface, tmp, &if_list, list) { + if (interface->hba == hba && + interface->vlan_id == (vlan_id & VLAN_VID_MASK)) + __bnx2fc_destroy(interface); + } + mutex_unlock(&bnx2fc_dev_lock); + return; + default: printk(KERN_ERR PFX "Unkonwn netevent %ld", event); return; @@ -838,8 +882,15 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event, bnx2fc_link_speed_update(lport); if (link_possible && !bnx2fc_link_ok(lport)) { - printk(KERN_ERR "indicate_netevent: ctlr_link_up\n"); - fcoe_ctlr_link_up(&interface->ctlr); + /* Reset max recv frame size to default */ + fc_set_mfs(lport, BNX2FC_MFS); + /* + * ctlr link up will only be handled during + * enable to avoid sending discovery solicitation + * on a stale vlan + */ + if (interface->enabled) + fcoe_ctlr_link_up(&interface->ctlr); } else if (fcoe_ctlr_link_down(&interface->ctlr)) { mutex_lock(&lport->lp_mutex); list_for_each_entry(vport, &lport->vports, list) @@ -995,6 +1046,17 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) struct bnx2fc_interface *interface = port->priv; struct net_device *netdev = interface->netdev; struct fc_lport *vn_port; + int rc; + char buf[32]; + + rc = fcoe_validate_vport_create(vport); + if (rc) { + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + printk(KERN_ERR PFX "Failed to create vport, " + "WWPN (0x%s) already exists\n", + buf); + return rc; + } if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) { printk(KERN_ERR PFX "vn ports cannot be created on" @@ -1024,16 +1086,46 @@ static int bnx2fc_vport_create(struct fc_vport *vport, bool disabled) return 0; } +static void bnx2fc_free_vport(struct bnx2fc_hba *hba, struct fc_lport *lport) +{ + struct bnx2fc_lport *blport, *tmp; + + spin_lock_bh(&hba->hba_lock); + list_for_each_entry_safe(blport, tmp, &hba->vports, list) { + if (blport->lport == lport) { + list_del(&blport->list); + kfree(blport); + } + } + spin_unlock_bh(&hba->hba_lock); +} + static int bnx2fc_vport_destroy(struct fc_vport *vport) { struct Scsi_Host *shost = vport_to_shost(vport); struct fc_lport *n_port = shost_priv(shost); struct fc_lport *vn_port = vport->dd_data; struct fcoe_port *port = lport_priv(vn_port); + struct bnx2fc_interface *interface = port->priv; + struct fc_lport *v_port; + bool found = false; mutex_lock(&n_port->lp_mutex); + list_for_each_entry(v_port, &n_port->vports, list) + if (v_port->vport == vport) { + found = true; + break; + } + + if (!found) { + mutex_unlock(&n_port->lp_mutex); + return -ENOENT; + } list_del(&vn_port->list); mutex_unlock(&n_port->lp_mutex); + bnx2fc_free_vport(interface->hba, port->lport); + bnx2fc_port_shutdown(port->lport); + bnx2fc_interface_put(interface); queue_work(bnx2fc_wq, &port->destroy_work); return 0; } @@ -1054,7 +1146,7 @@ static int bnx2fc_vport_disable(struct fc_vport *vport, bool disable) } -static int bnx2fc_netdev_setup(struct bnx2fc_interface *interface) +static int bnx2fc_interface_setup(struct bnx2fc_interface *interface) { struct net_device *netdev = interface->netdev; struct net_device *physdev = interface->hba->phys_dev; @@ -1252,7 +1344,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba, interface->ctlr.get_src_addr = bnx2fc_get_src_mac; set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); - rc = bnx2fc_netdev_setup(interface); + rc = bnx2fc_interface_setup(interface); if (!rc) return interface; @@ -1318,7 +1410,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, fc_set_wwpn(lport, vport->port_name); } /* Configure netdev and networking properties of the lport */ - rc = bnx2fc_net_config(lport); + rc = bnx2fc_net_config(lport, interface->netdev); if (rc) { printk(KERN_ERR PFX "Error on bnx2fc_net_config\n"); goto lp_config_err; @@ -1372,7 +1464,7 @@ free_blport: return NULL; } -static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface) +static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface) { /* Dont listen for Ethernet packets anymore */ __dev_remove_pack(&interface->fcoe_packet_type); @@ -1380,10 +1472,11 @@ static void bnx2fc_netdev_cleanup(struct bnx2fc_interface *interface) synchronize_net(); } -static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) +static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) { + struct fc_lport *lport = interface->ctlr.lp; struct fcoe_port *port = lport_priv(lport); - struct bnx2fc_lport *blport, *tmp; + struct bnx2fc_hba *hba = interface->hba; /* Stop the transmit retry timer */ del_timer_sync(&port->timer); @@ -1391,6 +1484,14 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) /* Free existing transmit skbs */ fcoe_clean_pending_queue(lport); + bnx2fc_net_cleanup(interface); + + bnx2fc_free_vport(hba, lport); +} + +static void bnx2fc_if_destroy(struct fc_lport *lport) +{ + /* Free queued packets for the receive thread */ bnx2fc_clean_rx_queue(lport); @@ -1407,19 +1508,22 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) /* Free memory used by statistical counters */ fc_lport_free_stats(lport); - spin_lock_bh(&hba->hba_lock); - list_for_each_entry_safe(blport, tmp, &hba->vports, list) { - if (blport->lport == lport) { - list_del(&blport->list); - kfree(blport); - } - } - spin_unlock_bh(&hba->hba_lock); - /* Release Scsi_Host */ scsi_host_put(lport->host); } +static void __bnx2fc_destroy(struct bnx2fc_interface *interface) +{ + struct fc_lport *lport = interface->ctlr.lp; + struct fcoe_port *port = lport_priv(lport); + + bnx2fc_interface_cleanup(interface); + bnx2fc_stop(interface); + list_del(&interface->list); + bnx2fc_interface_put(interface); + queue_work(bnx2fc_wq, &port->destroy_work); +} + /** * bnx2fc_destroy - Destroy a bnx2fc FCoE interface * @@ -1433,8 +1537,6 @@ static void bnx2fc_if_destroy(struct fc_lport *lport, struct bnx2fc_hba *hba) static int bnx2fc_destroy(struct net_device *netdev) { struct bnx2fc_interface *interface = NULL; - struct bnx2fc_hba *hba; - struct fc_lport *lport; int rc = 0; rtnl_lock(); @@ -1447,15 +1549,9 @@ static int bnx2fc_destroy(struct net_device *netdev) goto netdev_err; } - hba = interface->hba; - bnx2fc_netdev_cleanup(interface); - lport = interface->ctlr.lp; - bnx2fc_stop(interface); - list_del(&interface->list); destroy_workqueue(interface->timer_work_queue); - bnx2fc_interface_put(interface); - bnx2fc_if_destroy(lport, hba); + __bnx2fc_destroy(interface); netdev_err: mutex_unlock(&bnx2fc_dev_lock); @@ -1467,22 +1563,13 @@ static void bnx2fc_destroy_work(struct work_struct *work) { struct fcoe_port *port; struct fc_lport *lport; - struct bnx2fc_interface *interface; - struct bnx2fc_hba *hba; port = container_of(work, struct fcoe_port, destroy_work); lport = port->lport; - interface = port->priv; - hba = interface->hba; BNX2FC_HBA_DBG(lport, "Entered bnx2fc_destroy_work\n"); - bnx2fc_port_shutdown(lport); - rtnl_lock(); - mutex_lock(&bnx2fc_dev_lock); - bnx2fc_if_destroy(lport, hba); - mutex_unlock(&bnx2fc_dev_lock); - rtnl_unlock(); + bnx2fc_if_destroy(lport); } static void bnx2fc_unbind_adapter_devices(struct bnx2fc_hba *hba) @@ -1661,6 +1748,7 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) wait_event_interruptible(hba->destroy_wait, test_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags)); + clear_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); /* This should never happen */ if (signal_pending(current)) flush_signals(current); @@ -1723,7 +1811,7 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface) lport = interface->ctlr.lp; BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); - if (!bnx2fc_link_ok(lport)) { + if (!bnx2fc_link_ok(lport) && interface->enabled) { BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); fcoe_ctlr_link_up(&interface->ctlr); fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; @@ -1737,6 +1825,11 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface) if (++wait_cnt > 12) break; } + + /* Reset max receive frame size to default */ + if (fc_set_mfs(lport, BNX2FC_MFS)) + return; + fc_lport_init(lport); fc_fabric_login(lport); } @@ -1800,6 +1893,7 @@ static int bnx2fc_disable(struct net_device *netdev) rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); } else { + interface->enabled = false; fcoe_ctlr_link_down(&interface->ctlr); fcoe_clean_pending_queue(interface->ctlr.lp); } @@ -1822,8 +1916,10 @@ static int bnx2fc_enable(struct net_device *netdev) if (!interface || !interface->ctlr.lp) { rc = -ENODEV; printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); - } else if (!bnx2fc_link_ok(interface->ctlr.lp)) + } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { fcoe_ctlr_link_up(&interface->ctlr); + interface->enabled = true; + } mutex_unlock(&bnx2fc_dev_lock); rtnl_unlock(); @@ -1923,7 +2019,6 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) if (!lport) { printk(KERN_ERR PFX "Failed to create interface (%s)\n", netdev->name); - bnx2fc_netdev_cleanup(interface); rc = -EINVAL; goto if_create_err; } @@ -1936,8 +2031,15 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) /* Make this master N_port */ interface->ctlr.lp = lport; + if (!bnx2fc_link_ok(lport)) { + fcoe_ctlr_link_up(&interface->ctlr); + fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; + set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); + } + BNX2FC_HBA_DBG(lport, "create: START DISC\n"); bnx2fc_start_disc(interface); + interface->enabled = true; /* * Release from kref_init in bnx2fc_interface_setup, on success * lport should be holding a reference taken in bnx2fc_if_create @@ -1951,6 +2053,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) if_create_err: destroy_workqueue(interface->timer_work_queue); ifput_err: + bnx2fc_net_cleanup(interface); bnx2fc_interface_put(interface); netdev_err: module_put(THIS_MODULE); @@ -2017,7 +2120,6 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) { struct bnx2fc_hba *hba; struct bnx2fc_interface *interface, *tmp; - struct fc_lport *lport; BNX2FC_MISC_DBG("Entered bnx2fc_ulp_exit\n"); @@ -2039,18 +2141,10 @@ static void bnx2fc_ulp_exit(struct cnic_dev *dev) list_del_init(&hba->list); adapter_count--; - list_for_each_entry_safe(interface, tmp, &if_list, list) { + list_for_each_entry_safe(interface, tmp, &if_list, list) /* destroy not called yet, move to quiesced list */ - if (interface->hba == hba) { - bnx2fc_netdev_cleanup(interface); - bnx2fc_stop(interface); - - list_del(&interface->list); - lport = interface->ctlr.lp; - bnx2fc_interface_put(interface); - bnx2fc_if_destroy(lport, hba); - } - } + if (interface->hba == hba) + __bnx2fc_destroy(interface); mutex_unlock(&bnx2fc_dev_lock); bnx2fc_ulp_stop(hba); @@ -2119,7 +2213,7 @@ static void bnx2fc_percpu_thread_create(unsigned int cpu) (void *)p, "bnx2fc_thread/%d", cpu); /* bind thread to the cpu */ - if (likely(!IS_ERR(p->iothread))) { + if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); p->iothread = thread; wake_up_process(thread); @@ -2131,7 +2225,6 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) struct bnx2fc_percpu_s *p; struct task_struct *thread; struct bnx2fc_work *work, *tmp; - LIST_HEAD(work_list); BNX2FC_MISC_DBG("destroying io thread for CPU %d\n", cpu); @@ -2143,7 +2236,7 @@ static void bnx2fc_percpu_thread_destroy(unsigned int cpu) /* Free all work in the list */ - list_for_each_entry_safe(work, tmp, &work_list, list) { + list_for_each_entry_safe(work, tmp, &p->work_list, list) { list_del_init(&work->list); bnx2fc_process_cq_compl(work->tgt, work->wqe); kfree(work); @@ -2376,6 +2469,7 @@ static struct fc_function_template bnx2fc_transport_function = { .vport_create = bnx2fc_vport_create, .vport_delete = bnx2fc_vport_destroy, .vport_disable = bnx2fc_vport_disable, + .bsg_request = fc_lport_bsg_request, }; static struct fc_function_template bnx2fc_vport_xport_function = { @@ -2409,6 +2503,7 @@ static struct fc_function_template bnx2fc_vport_xport_function = { .get_fc_host_stats = fc_get_host_stats, .issue_fc_host_lip = bnx2fc_fcoe_reset, .terminate_rport_io = fc_rport_terminate_io, + .bsg_request = fc_lport_bsg_request, }; /** @@ -2438,6 +2533,7 @@ static struct libfc_function_template bnx2fc_libfc_fcn_templ = { .elsct_send = bnx2fc_elsct_send, .fcp_abort_io = bnx2fc_abort_io, .fcp_cleanup = bnx2fc_cleanup, + .get_lesb = bnx2fc_get_lesb, .rport_event_callback = bnx2fc_rport_event_handler, }; diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c index 72cfb14acd3a..1923a25cb6a2 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c +++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c @@ -1009,6 +1009,7 @@ int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) u32 cq_cons; struct fcoe_cqe *cqe; u32 num_free_sqes = 0; + u32 num_cqes = 0; u16 wqe; /* @@ -1058,10 +1059,11 @@ unlock: wake_up_process(fps->iothread); else bnx2fc_process_cq_compl(tgt, wqe); + num_free_sqes++; } cqe++; tgt->cq_cons_idx++; - num_free_sqes++; + num_cqes++; if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { tgt->cq_cons_idx = 0; @@ -1070,8 +1072,10 @@ unlock: 1 - tgt->cq_curr_toggle_bit; } } - if (num_free_sqes) { - bnx2fc_arm_cq(tgt); + if (num_cqes) { + /* Arm CQ only if doorbell is mapped */ + if (tgt->ctx_base) + bnx2fc_arm_cq(tgt); atomic_add(num_free_sqes, &tgt->free_sqes); } spin_unlock_bh(&tgt->cq_lock); @@ -1739,11 +1743,13 @@ void bnx2fc_init_task(struct bnx2fc_cmd *io_req, /* Init state to NORMAL */ task->txwr_rxrd.const_ctx.init_flags |= task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; - if (dev_type == TYPE_TAPE) + if (dev_type == TYPE_TAPE) { task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; - else + io_req->rec_retry = 0; + io_req->rec_retry = 0; + } else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 6cc3789075bc..0c64d184d731 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -17,7 +17,7 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, int bd_index); static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req); -static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req); static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req); static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req); static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, @@ -1251,7 +1251,6 @@ void bnx2fc_process_seq_cleanup_compl(struct bnx2fc_cmd *seq_clnp_req, seq_clnp_req->xid); goto free_cb_arg; } - kref_get(&orig_io_req->refcount); spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_srr(orig_io_req, offset, r_ctl); @@ -1569,6 +1568,8 @@ static int bnx2fc_split_bd(struct bnx2fc_cmd *io_req, u64 addr, int sg_len, static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) { + struct bnx2fc_interface *interface = io_req->port->priv; + struct bnx2fc_hba *hba = interface->hba; struct scsi_cmnd *sc = io_req->sc_cmd; struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; struct scatterlist *sg; @@ -1580,7 +1581,8 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) u64 addr; int i; - sg_count = scsi_dma_map(sc); + sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc), + scsi_sg_count(sc), sc->sc_data_direction); scsi_for_each_sg(sc, sg, sg_count, i) { sg_len = sg_dma_len(sg); addr = sg_dma_address(sg); @@ -1605,20 +1607,24 @@ static int bnx2fc_map_sg(struct bnx2fc_cmd *io_req) return bd_count; } -static void bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) +static int bnx2fc_build_bd_list_from_sg(struct bnx2fc_cmd *io_req) { struct scsi_cmnd *sc = io_req->sc_cmd; struct fcoe_bd_ctx *bd = io_req->bd_tbl->bd_tbl; int bd_count; - if (scsi_sg_count(sc)) + if (scsi_sg_count(sc)) { bd_count = bnx2fc_map_sg(io_req); - else { + if (bd_count == 0) + return -ENOMEM; + } else { bd_count = 0; bd[0].buf_addr_lo = bd[0].buf_addr_hi = 0; bd[0].buf_len = bd[0].flags = 0; } io_req->bd_tbl->bd_valid = bd_count; + + return 0; } static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req) @@ -1790,12 +1796,6 @@ int bnx2fc_queuecommand(struct Scsi_Host *host, tgt = (struct bnx2fc_rport *)&rp[1]; if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { - if (test_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags)) { - sc_cmd->result = DID_NO_CONNECT << 16; - sc_cmd->scsi_done(sc_cmd); - return 0; - - } /* * Session is not offloaded yet. Let SCSI-ml retry * the command. @@ -1946,7 +1946,13 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt, xid = io_req->xid; /* Build buffer descriptor list for firmware from sg list */ - bnx2fc_build_bd_list_from_sg(io_req); + if (bnx2fc_build_bd_list_from_sg(io_req)) { + printk(KERN_ERR PFX "BD list creation failed\n"); + spin_lock_bh(&tgt->tgt_lock); + kref_put(&io_req->refcount, bnx2fc_cmd_release); + spin_unlock_bh(&tgt->tgt_lock); + return -EAGAIN; + } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c index d5311b577cca..c1800b531270 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c +++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c @@ -76,7 +76,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port, if (rval) { printk(KERN_ERR PFX "Failed to allocate conn id for " "port_id (%6x)\n", rport->port_id); - goto ofld_err; + goto tgt_init_err; } /* Allocate session resources */ @@ -134,18 +134,17 @@ retry_ofld: /* upload will take care of cleaning up sess resc */ lport->tt.rport_logoff(rdata); } - /* Arm CQ */ - bnx2fc_arm_cq(tgt); return; ofld_err: /* couldn't offload the session. log off from this rport */ BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n"); - lport->tt.rport_logoff(rdata); /* Free session resources */ bnx2fc_free_session_resc(hba, tgt); +tgt_init_err: if (tgt->fcoe_conn_id != -1) bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); + lport->tt.rport_logoff(rdata); } void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt) @@ -624,7 +623,6 @@ static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id) /* called with hba mutex held */ spin_lock_bh(&hba->hba_lock); hba->tgt_ofld_list[conn_id] = NULL; - hba->next_conn_id = conn_id; spin_unlock_bh(&hba->hba_lock); } @@ -791,8 +789,6 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba, return 0; mem_alloc_failure: - bnx2fc_free_session_resc(hba, tgt); - bnx2fc_free_conn_id(hba, tgt->fcoe_conn_id); return -ENOMEM; } @@ -807,14 +803,14 @@ mem_alloc_failure: static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { - BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); + void __iomem *ctx_base_ptr; - if (tgt->ctx_base) { - iounmap(tgt->ctx_base); - tgt->ctx_base = NULL; - } + BNX2FC_TGT_DBG(tgt, "Freeing up session resources\n"); spin_lock_bh(&tgt->cq_lock); + ctx_base_ptr = tgt->ctx_base; + tgt->ctx_base = NULL; + /* Free LCQ */ if (tgt->lcq) { dma_free_coherent(&hba->pcidev->dev, tgt->lcq_mem_size, @@ -868,4 +864,7 @@ static void bnx2fc_free_session_resc(struct bnx2fc_hba *hba, tgt->sq = NULL; } spin_unlock_bh(&tgt->cq_lock); + + if (ctx_base_ptr) + iounmap(ctx_base_ptr); } diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index cffd4d75df56..d1e697190970 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -2177,6 +2177,59 @@ static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params) return 0; } +static mode_t bnx2i_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} /* * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template @@ -2207,37 +2260,12 @@ struct iscsi_transport bnx2i_iscsi_transport = { CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD | CAP_TEXT_NEGO, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_ERL | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_NETDEV_NAME, .create_session = bnx2i_session_create, .destroy_session = bnx2i_session_destroy, .create_conn = bnx2i_conn_create, .bind_conn = bnx2i_conn_bind, .destroy_conn = bnx2i_conn_destroy, + .attr_is_visible = bnx2i_attr_is_visible, .set_param = iscsi_set_param, .get_conn_param = iscsi_conn_get_param, .get_session_param = iscsi_session_get_param, diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 1242c7c04a01..000294a9df80 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -105,25 +105,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = { .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, - .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | - ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME | - ISCSI_HOST_NETDEV_NAME, + .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, /* session management */ diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 31c79bde6976..ac7a9b1e3e23 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -106,25 +106,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = { .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST | CAP_DIGEST_OFFLOAD | CAP_PADDING_OFFLOAD | CAP_TEXT_NEGO, - .param_mask = ISCSI_MAX_RECV_DLENGTH | ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | ISCSI_ERL | - ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME | - ISCSI_HOST_NETDEV_NAME, + .attr_is_visible = cxgbi_attr_is_visible, .get_host_param = cxgbi_get_host_param, .set_host_param = cxgbi_set_host_param, /* session management */ diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 1c1329bc77c7..c363a4b260fd 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -2568,6 +2568,62 @@ void cxgbi_iscsi_cleanup(struct iscsi_transport *itp, } EXPORT_SYMBOL_GPL(cxgbi_iscsi_cleanup); +mode_t cxgbi_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} +EXPORT_SYMBOL_GPL(cxgbi_attr_is_visible); + static int __init libcxgbi_init_module(void) { sw_tag_idx_bits = (__ilog2_u32(ISCSI_ITT_MASK)) + 1; diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h index 3a25b1187c10..20c88279c7a6 100644 --- a/drivers/scsi/cxgbi/libcxgbi.h +++ b/drivers/scsi/cxgbi/libcxgbi.h @@ -709,6 +709,7 @@ int cxgbi_conn_xmit_pdu(struct iscsi_task *); void cxgbi_cleanup_task(struct iscsi_task *task); +mode_t cxgbi_attr_is_visible(int param_type, int param); void cxgbi_get_conn_stats(struct iscsi_cls_conn *, struct iscsi_stats *); int cxgbi_set_conn_param(struct iscsi_cls_conn *, enum iscsi_param, char *, int); diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c index 0119b8147797..7c05fd9dccfd 100644 --- a/drivers/scsi/device_handler/scsi_dh.c +++ b/drivers/scsi/device_handler/scsi_dh.c @@ -60,6 +60,46 @@ static struct scsi_device_handler *get_device_handler_by_idx(int idx) } /* + * device_handler_match_function - Match a device handler to a device + * @sdev - SCSI device to be tested + * + * Tests @sdev against the match function of all registered device_handler. + * Returns the found device handler or NULL if not found. + */ +static struct scsi_device_handler * +device_handler_match_function(struct scsi_device *sdev) +{ + struct scsi_device_handler *tmp_dh, *found_dh = NULL; + + spin_lock(&list_lock); + list_for_each_entry(tmp_dh, &scsi_dh_list, list) { + if (tmp_dh->match && tmp_dh->match(sdev)) { + found_dh = tmp_dh; + break; + } + } + spin_unlock(&list_lock); + return found_dh; +} + +/* + * device_handler_match_devlist - Match a device handler to a device + * @sdev - SCSI device to be tested + * + * Tests @sdev against all device_handler registered in the devlist. + * Returns the found device handler or NULL if not found. + */ +static struct scsi_device_handler * +device_handler_match_devlist(struct scsi_device *sdev) +{ + int idx; + + idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model, + SCSI_DEVINFO_DH); + return get_device_handler_by_idx(idx); +} + +/* * device_handler_match - Attach a device handler to a device * @scsi_dh - The device handler to match against or NULL * @sdev - SCSI device to be tested against @scsi_dh @@ -72,12 +112,11 @@ static struct scsi_device_handler * device_handler_match(struct scsi_device_handler *scsi_dh, struct scsi_device *sdev) { - struct scsi_device_handler *found_dh = NULL; - int idx; + struct scsi_device_handler *found_dh; - idx = scsi_get_device_flags_keyed(sdev, sdev->vendor, sdev->model, - SCSI_DEVINFO_DH); - found_dh = get_device_handler_by_idx(idx); + found_dh = device_handler_match_function(sdev); + if (!found_dh) + found_dh = device_handler_match_devlist(sdev); if (scsi_dh && found_dh != scsi_dh) found_dh = NULL; @@ -151,6 +190,10 @@ store_dh_state(struct device *dev, struct device_attribute *attr, struct scsi_device_handler *scsi_dh; int err = -EINVAL; + if (sdev->sdev_state == SDEV_CANCEL || + sdev->sdev_state == SDEV_DEL) + return -ENODEV; + if (!sdev->scsi_dh_data) { /* * Attach to a device handler @@ -327,7 +370,7 @@ int scsi_register_device_handler(struct scsi_device_handler *scsi_dh) list_add(&scsi_dh->list, &scsi_dh_list); spin_unlock(&list_lock); - for (i = 0; scsi_dh->devlist[i].vendor; i++) { + for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) { scsi_dev_info_list_add_keyed(0, scsi_dh->devlist[i].vendor, scsi_dh->devlist[i].model, @@ -360,7 +403,7 @@ int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh) bus_for_each_dev(&scsi_bus_type, NULL, scsi_dh, scsi_dh_notifier_remove); - for (i = 0; scsi_dh->devlist[i].vendor; i++) { + for (i = 0; scsi_dh->devlist && scsi_dh->devlist[i].vendor; i++) { scsi_dev_info_list_del_keyed(scsi_dh->devlist[i].vendor, scsi_dh->devlist[i].model, SCSI_DEVINFO_DH); @@ -468,7 +511,7 @@ int scsi_dh_handler_exist(const char *name) EXPORT_SYMBOL_GPL(scsi_dh_handler_exist); /* - * scsi_dh_handler_attach - Attach device handler + * scsi_dh_attach - Attach device handler * @sdev - sdev the handler should be attached to * @name - name of the handler to attach */ @@ -498,7 +541,7 @@ int scsi_dh_attach(struct request_queue *q, const char *name) EXPORT_SYMBOL_GPL(scsi_dh_attach); /* - * scsi_dh_handler_detach - Detach device handler + * scsi_dh_detach - Detach device handler * @sdev - sdev the handler should be detached from * * This function will detach the device handler only diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c index 6fec9fe5dc39..627f4b5e5176 100644 --- a/drivers/scsi/device_handler/scsi_dh_alua.c +++ b/drivers/scsi/device_handler/scsi_dh_alua.c @@ -128,43 +128,6 @@ static struct request *get_alua_req(struct scsi_device *sdev, } /* - * submit_std_inquiry - Issue a standard INQUIRY command - * @sdev: sdev the command should be send to - */ -static int submit_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) -{ - struct request *rq; - int err = SCSI_DH_RES_TEMP_UNAVAIL; - - rq = get_alua_req(sdev, h->inq, ALUA_INQUIRY_SIZE, READ); - if (!rq) - goto done; - - /* Prepare the command. */ - rq->cmd[0] = INQUIRY; - rq->cmd[1] = 0; - rq->cmd[2] = 0; - rq->cmd[4] = ALUA_INQUIRY_SIZE; - rq->cmd_len = COMMAND_SIZE(INQUIRY); - - rq->sense = h->sense; - memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); - rq->sense_len = h->senselen = 0; - - err = blk_execute_rq(rq->q, NULL, rq, 1); - if (err == -EIO) { - sdev_printk(KERN_INFO, sdev, - "%s: std inquiry failed with %x\n", - ALUA_DH_NAME, rq->errors); - h->senselen = rq->sense_len; - err = SCSI_DH_IO; - } - blk_put_request(rq); -done: - return err; -} - -/* * submit_vpd_inquiry - Issue an INQUIRY VPD page 0x83 command * @sdev: sdev the command should be sent to */ @@ -338,23 +301,17 @@ static unsigned submit_stpg(struct alua_dh_data *h) } /* - * alua_std_inquiry - Evaluate standard INQUIRY command + * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * - * Just extract the TPGS setting to find out if ALUA + * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ -static int alua_std_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) +static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h) { - int err; - - err = submit_std_inquiry(sdev, h); - - if (err != SCSI_DH_OK) - return err; + int err = SCSI_DH_OK; - /* Check TPGS setting */ - h->tpgs = (h->inq[5] >> 4) & 0x3; + h->tpgs = scsi_device_tpgs(sdev); switch (h->tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, @@ -508,27 +465,28 @@ static int alua_check_sense(struct scsi_device *sdev, * Power On, Reset, or Bus Device Reset, just retry. */ return ADD_TO_MLQUEUE; - if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) /* * ALUA state changed */ return ADD_TO_MLQUEUE; - } - if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { + if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) /* * Implicit ALUA state transition failed */ return ADD_TO_MLQUEUE; - } - if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) { + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) + /* + * Inquiry data has changed + */ + return ADD_TO_MLQUEUE; + if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry. */ return ADD_TO_MLQUEUE; - } - break; } @@ -547,9 +505,9 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; - char *ucp; + unsigned char *ucp; unsigned err; - unsigned long expiry, interval = 10; + unsigned long expiry, interval = 1; expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); retry: @@ -610,7 +568,7 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) case TPGS_STATE_TRANSITIONING: if (time_before(jiffies, expiry)) { /* State transition, retry */ - interval *= 10; + interval *= 2; msleep(interval); goto retry; } @@ -642,7 +600,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err; - err = alua_std_inquiry(sdev, h); + err = alua_check_tpgs(sdev, h); if (err != SCSI_DH_OK) goto out; @@ -674,11 +632,9 @@ static int alua_activate(struct scsi_device *sdev, struct alua_dh_data *h = get_alua_data(sdev); int err = SCSI_DH_OK; - if (h->group_id != -1) { - err = alua_rtpg(sdev, h); - if (err != SCSI_DH_OK) - goto out; - } + err = alua_rtpg(sdev, h); + if (err != SCSI_DH_OK) + goto out; if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED && @@ -720,23 +676,10 @@ static int alua_prep_fn(struct scsi_device *sdev, struct request *req) } -static const struct scsi_dh_devlist alua_dev_list[] = { - {"HP", "MSA VOLUME" }, - {"HP", "HSV101" }, - {"HP", "HSV111" }, - {"HP", "HSV200" }, - {"HP", "HSV210" }, - {"HP", "HSV300" }, - {"IBM", "2107900" }, - {"IBM", "2145" }, - {"Pillar", "Axiom" }, - {"Intel", "Multi-Flex"}, - {"NETAPP", "LUN"}, - {"NETAPP", "LUN C-Mode"}, - {"AIX", "NVDISK"}, - {"Promise", "VTrak"}, - {NULL, NULL} -}; +static bool alua_match(struct scsi_device *sdev) +{ + return (scsi_device_tpgs(sdev) != 0); +} static int alua_bus_attach(struct scsi_device *sdev); static void alua_bus_detach(struct scsi_device *sdev); @@ -744,12 +687,12 @@ static void alua_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, - .devlist = alua_dev_list, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, + .match = alua_match, }; /* diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c index 27c9d65d54a9..82d612f0c49d 100644 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c @@ -1,5 +1,5 @@ /* - * Engenio/LSI RDAC SCSI Device Handler + * LSI/Engenio/NetApp E-Series RDAC SCSI Device Handler * * Copyright (C) 2005 Mike Christie. All rights reserved. * Copyright (C) Chandra Seetharaman, IBM Corp. 2007 @@ -795,6 +795,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"IBM", "3526"}, {"SGI", "TP9400"}, {"SGI", "TP9500"}, + {"SGI", "TP9700"}, {"SGI", "IS"}, {"STK", "OPENstorage D280"}, {"SUN", "CSM200_R"}, @@ -814,6 +815,7 @@ static const struct scsi_dh_devlist rdac_dev_list[] = { {"SUN", "CSM100_R_FC"}, {"SUN", "STK6580_6780"}, {"SUN", "SUN_6180"}, + {"SUN", "ArrayStorage"}, {NULL, NULL}, }; @@ -945,7 +947,7 @@ static void __exit rdac_exit(void) module_init(rdac_init); module_exit(rdac_exit); -MODULE_DESCRIPTION("Multipath LSI/Engenio RDAC driver"); +MODULE_DESCRIPTION("Multipath LSI/Engenio/NetApp E-Series RDAC driver"); MODULE_AUTHOR("Mike Christie, Chandra Seetharaman"); MODULE_VERSION("01.00.0000.0000"); MODULE_LICENSE("GPL"); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index a1c0ddd53aa9..61384ee4049b 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -51,7 +51,7 @@ MODULE_DESCRIPTION("FCoE"); MODULE_LICENSE("GPL v2"); /* Performance tuning parameters for fcoe */ -static unsigned int fcoe_ddp_min; +static unsigned int fcoe_ddp_min = 4096; module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for " \ "Direct Data Placement (DDP)."); @@ -137,7 +137,6 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled); static int fcoe_vport_disable(struct fc_vport *, bool disable); static void fcoe_set_vport_symbolic_name(struct fc_vport *); static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); -static int fcoe_validate_vport_create(struct fc_vport *); static struct libfc_function_template fcoe_libfc_fcn_templ = { .frame_send = fcoe_xmit, @@ -280,6 +279,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe, * use the first one for SPMA */ real_dev = (netdev->priv_flags & IFF_802_1Q_VLAN) ? vlan_dev_real_dev(netdev) : netdev; + fcoe->realdev = real_dev; rcu_read_lock(); for_each_dev_addr(real_dev, ha) { if ((ha->type == NETDEV_HW_ADDR_T_SAN) && @@ -580,23 +580,6 @@ static int fcoe_lport_config(struct fc_lport *lport) } /** - * fcoe_get_wwn() - Get the world wide name from LLD if it supports it - * @netdev: the associated net device - * @wwn: the output WWN - * @type: the type of WWN (WWPN or WWNN) - * - * Returns: 0 for success - */ -static int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) -{ - const struct net_device_ops *ops = netdev->netdev_ops; - - if (ops->ndo_fcoe_get_wwn) - return ops->ndo_fcoe_get_wwn(netdev, wwn, type); - return -EINVAL; -} - -/** * fcoe_netdev_features_change - Updates the lport's offload flags based * on the LLD netdev's FCoE feature flags */ @@ -1134,8 +1117,9 @@ static void fcoe_percpu_thread_create(unsigned int cpu) p = &per_cpu(fcoe_percpu, cpu); - thread = kthread_create(fcoe_percpu_receive_thread, - (void *)p, "fcoethread/%d", cpu); + thread = kthread_create_on_node(fcoe_percpu_receive_thread, + (void *)p, cpu_to_node(cpu), + "fcoethread/%d", cpu); if (likely(!IS_ERR(thread))) { kthread_bind(thread, cpu); @@ -1538,7 +1522,13 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp) skb_reset_network_header(skb); skb->mac_len = elen; skb->protocol = htons(ETH_P_FCOE); - skb->dev = fcoe->netdev; + if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN && + fcoe->realdev->features & NETIF_F_HW_VLAN_TX) { + skb->vlan_tci = VLAN_TAG_PRESENT | + vlan_dev_vlan_id(fcoe->netdev); + skb->dev = fcoe->realdev; + } else + skb->dev = fcoe->netdev; /* fill up mac and fcoe headers */ eh = eth_hdr(skb); @@ -2446,7 +2436,7 @@ static int fcoe_vport_create(struct fc_vport *vport, bool disabled) rc = fcoe_validate_vport_create(vport); if (rc) { - wwn_to_str(vport->port_name, buf, sizeof(buf)); + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); printk(KERN_ERR "fcoe: Failed to create vport, " "WWPN (0x%s) already exists\n", buf); @@ -2555,28 +2545,9 @@ static void fcoe_set_vport_symbolic_name(struct fc_vport *vport) static void fcoe_get_lesb(struct fc_lport *lport, struct fc_els_lesb *fc_lesb) { - unsigned int cpu; - u32 lfc, vlfc, mdac; - struct fcoe_dev_stats *devst; - struct fcoe_fc_els_lesb *lesb; - struct rtnl_link_stats64 temp; struct net_device *netdev = fcoe_netdev(lport); - lfc = 0; - vlfc = 0; - mdac = 0; - lesb = (struct fcoe_fc_els_lesb *)fc_lesb; - memset(lesb, 0, sizeof(*lesb)); - for_each_possible_cpu(cpu) { - devst = per_cpu_ptr(lport->dev_stats, cpu); - lfc += devst->LinkFailureCount; - vlfc += devst->VLinkFailureCount; - mdac += devst->MissDiscAdvCount; - } - lesb->lesb_link_fail = htonl(lfc); - lesb->lesb_vlink_fail = htonl(vlfc); - lesb->lesb_miss_fka = htonl(mdac); - lesb->lesb_fcs_error = htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); + __fcoe_get_lesb(lport, fc_lesb, netdev); } /** @@ -2600,49 +2571,3 @@ static void fcoe_set_port_id(struct fc_lport *lport, if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); } - -/** - * fcoe_validate_vport_create() - Validate a vport before creating it - * @vport: NPIV port to be created - * - * This routine is meant to add validation for a vport before creating it - * via fcoe_vport_create(). - * Current validations are: - * - WWPN supplied is unique for given lport - * - * -*/ -static int fcoe_validate_vport_create(struct fc_vport *vport) -{ - struct Scsi_Host *shost = vport_to_shost(vport); - struct fc_lport *n_port = shost_priv(shost); - struct fc_lport *vn_port; - int rc = 0; - char buf[32]; - - mutex_lock(&n_port->lp_mutex); - - wwn_to_str(vport->port_name, buf, sizeof(buf)); - /* Check if the wwpn is not same as that of the lport */ - if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { - FCOE_DBG("vport WWPN 0x%s is same as that of the " - "base port WWPN\n", buf); - rc = -EINVAL; - goto out; - } - - /* Check if there is any existing vport with same wwpn */ - list_for_each_entry(vn_port, &n_port->vports, list) { - if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { - FCOE_DBG("vport with given WWPN 0x%s already " - "exists\n", buf); - rc = -EINVAL; - break; - } - } - -out: - mutex_unlock(&n_port->lp_mutex); - - return rc; -} diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h index c4a93993c0cf..6c6884bcf840 100644 --- a/drivers/scsi/fcoe/fcoe.h +++ b/drivers/scsi/fcoe/fcoe.h @@ -80,6 +80,7 @@ do { \ struct fcoe_interface { struct list_head list; struct net_device *netdev; + struct net_device *realdev; struct packet_type fcoe_packet_type; struct packet_type fip_packet_type; struct fcoe_ctlr ctlr; @@ -99,14 +100,4 @@ static inline struct net_device *fcoe_netdev(const struct fc_lport *lport) ((struct fcoe_port *)lport_priv(lport))->priv)->netdev; } -static inline void wwn_to_str(u64 wwn, char *buf, int len) -{ - u8 wwpn[8]; - - u64_to_wwn(wwn, wwpn); - snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", - wwpn[0], wwpn[1], wwpn[2], wwpn[3], - wwpn[4], wwpn[5], wwpn[6], wwpn[7]); -} - #endif /* _FCOE_H_ */ diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index dac8e39a5188..bd97b2273f20 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -83,6 +83,107 @@ static struct notifier_block libfcoe_notifier = { .notifier_call = libfcoe_device_notification, }; +void __fcoe_get_lesb(struct fc_lport *lport, + struct fc_els_lesb *fc_lesb, + struct net_device *netdev) +{ + unsigned int cpu; + u32 lfc, vlfc, mdac; + struct fcoe_dev_stats *devst; + struct fcoe_fc_els_lesb *lesb; + struct rtnl_link_stats64 temp; + + lfc = 0; + vlfc = 0; + mdac = 0; + lesb = (struct fcoe_fc_els_lesb *)fc_lesb; + memset(lesb, 0, sizeof(*lesb)); + for_each_possible_cpu(cpu) { + devst = per_cpu_ptr(lport->dev_stats, cpu); + lfc += devst->LinkFailureCount; + vlfc += devst->VLinkFailureCount; + mdac += devst->MissDiscAdvCount; + } + lesb->lesb_link_fail = htonl(lfc); + lesb->lesb_vlink_fail = htonl(vlfc); + lesb->lesb_miss_fka = htonl(mdac); + lesb->lesb_fcs_error = + htonl(dev_get_stats(netdev, &temp)->rx_crc_errors); +} +EXPORT_SYMBOL_GPL(__fcoe_get_lesb); + +void fcoe_wwn_to_str(u64 wwn, char *buf, int len) +{ + u8 wwpn[8]; + + u64_to_wwn(wwn, wwpn); + snprintf(buf, len, "%02x%02x%02x%02x%02x%02x%02x%02x", + wwpn[0], wwpn[1], wwpn[2], wwpn[3], + wwpn[4], wwpn[5], wwpn[6], wwpn[7]); +} +EXPORT_SYMBOL_GPL(fcoe_wwn_to_str); + +/** + * fcoe_validate_vport_create() - Validate a vport before creating it + * @vport: NPIV port to be created + * + * This routine is meant to add validation for a vport before creating it + * via fcoe_vport_create(). + * Current validations are: + * - WWPN supplied is unique for given lport + */ +int fcoe_validate_vport_create(struct fc_vport *vport) +{ + struct Scsi_Host *shost = vport_to_shost(vport); + struct fc_lport *n_port = shost_priv(shost); + struct fc_lport *vn_port; + int rc = 0; + char buf[32]; + + mutex_lock(&n_port->lp_mutex); + + fcoe_wwn_to_str(vport->port_name, buf, sizeof(buf)); + /* Check if the wwpn is not same as that of the lport */ + if (!memcmp(&n_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport WWPN 0x%s is same as that of the " + "base port WWPN\n", buf); + rc = -EINVAL; + goto out; + } + + /* Check if there is any existing vport with same wwpn */ + list_for_each_entry(vn_port, &n_port->vports, list) { + if (!memcmp(&vn_port->wwpn, &vport->port_name, sizeof(u64))) { + LIBFCOE_TRANSPORT_DBG("vport with given WWPN 0x%s " + "already exists\n", buf); + rc = -EINVAL; + break; + } + } +out: + mutex_unlock(&n_port->lp_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(fcoe_validate_vport_create); + +/** + * fcoe_get_wwn() - Get the world wide name from LLD if it supports it + * @netdev: the associated net device + * @wwn: the output WWN + * @type: the type of WWN (WWPN or WWNN) + * + * Returns: 0 for success + */ +int fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) +{ + const struct net_device_ops *ops = netdev->netdev_ops; + + if (ops->ndo_fcoe_get_wwn) + return ops->ndo_fcoe_get_wwn(netdev, wwn, type); + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fcoe_get_wwn); + /** * fcoe_fc_crc() - Calculates the CRC for a given frame * @fp: The frame to be checksumed diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index b200b736b000..9825ecf34957 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -3438,10 +3438,8 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) } else { use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; if (use_doorbell) { - dev_warn(&pdev->dev, "Controller claims that " - "'Bit 2 doorbell reset' is " - "supported, but not 'bit 5 doorbell reset'. " - "Firmware update is recommended.\n"); + dev_warn(&pdev->dev, "Soft reset not supported. " + "Firmware update is required.\n"); rc = -ENOTSUPP; /* try soft reset */ goto unmap_cfgtable; } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 8d636301e32c..73e24b48dced 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -2901,7 +2901,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump) spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->sdt_state != GET_DUMP) { + if (ioa_cfg->sdt_state != READ_DUMP) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; } @@ -3097,7 +3097,7 @@ static void ipr_worker_thread(struct work_struct *work) ENTER; spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->sdt_state == GET_DUMP) { + if (ioa_cfg->sdt_state == READ_DUMP) { dump = ioa_cfg->dump; if (!dump) { spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); @@ -3109,7 +3109,7 @@ static void ipr_worker_thread(struct work_struct *work) kref_put(&dump->kref, ipr_release_dump); spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); - if (ioa_cfg->sdt_state == DUMP_OBTAINED) + if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout) ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE); spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); return; @@ -3751,14 +3751,6 @@ static ssize_t ipr_store_update_fw(struct device *dev, image_hdr = (struct ipr_ucode_image_header *)fw_entry->data; - if (be32_to_cpu(image_hdr->header_length) > fw_entry->size || - (ioa_cfg->vpd_cbs->page3_data.card_type && - ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) { - dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n"); - release_firmware(fw_entry); - return -EINVAL; - } - src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length); dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length); sglist = ipr_alloc_ucode_buffer(dnld_size); @@ -3777,6 +3769,8 @@ static ssize_t ipr_store_update_fw(struct device *dev, goto out; } + ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n"); + result = ipr_update_ioa_ucode(ioa_cfg, sglist); if (!result) @@ -7449,8 +7443,11 @@ static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd) struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + else if (ioa_cfg->sdt_state == READ_DUMP) ioa_cfg->sdt_state = ABORT_DUMP; + ioa_cfg->dump_timeout = 1; ipr_cmd->job_step = ipr_reset_alert; return IPR_RC_JOB_CONTINUE; @@ -7614,6 +7611,8 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) ipr_cmd->job_step = ipr_reset_enable_ioa; if (GET_DUMP == ioa_cfg->sdt_state) { + ioa_cfg->sdt_state = READ_DUMP; + ioa_cfg->dump_timeout = 0; if (ioa_cfg->sis64) ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT); else @@ -8003,8 +8002,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg, if (ioa_cfg->ioa_is_dead) return; - if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP) - ioa_cfg->sdt_state = ABORT_DUMP; + if (ioa_cfg->in_reset_reload) { + if (ioa_cfg->sdt_state == GET_DUMP) + ioa_cfg->sdt_state = WAIT_FOR_DUMP; + else if (ioa_cfg->sdt_state == READ_DUMP) + ioa_cfg->sdt_state = ABORT_DUMP; + } if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) { dev_err(&ioa_cfg->pdev->dev, @@ -8812,7 +8815,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev, uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32); if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT)) ioa_cfg->needs_hard_reset = 1; - if (interrupts & IPR_PCII_ERROR_INTERRUPTS) + if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices) ioa_cfg->needs_hard_reset = 1; if (interrupts & IPR_PCII_IOA_UNIT_CHECKED) ioa_cfg->ioa_unit_checked = 1; diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h index f93f8637c5a1..6d257e0dd6a5 100644 --- a/drivers/scsi/ipr.h +++ b/drivers/scsi/ipr.h @@ -208,7 +208,7 @@ #define IPR_CANCEL_ALL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) #define IPR_ABORT_TASK_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) #define IPR_INTERNAL_TIMEOUT (ipr_fastfail ? 10 * HZ : 30 * HZ) -#define IPR_WRITE_BUFFER_TIMEOUT (10 * 60 * HZ) +#define IPR_WRITE_BUFFER_TIMEOUT (30 * 60 * HZ) #define IPR_SET_SUP_DEVICE_TIMEOUT (2 * 60 * HZ) #define IPR_REQUEST_SENSE_TIMEOUT (10 * HZ) #define IPR_OPERATIONAL_TIMEOUT (5 * 60) @@ -1360,6 +1360,7 @@ enum ipr_sdt_state { INACTIVE, WAIT_FOR_DUMP, GET_DUMP, + READ_DUMP, ABORT_DUMP, DUMP_OBTAINED }; @@ -1384,6 +1385,7 @@ struct ipr_ioa_cfg { u8 needs_warm_reset:1; u8 msi_received:1; u8 sis64:1; + u8 dump_timeout:1; u8 revid; diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 6981b773a88d..f07f30fada1b 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -1263,6 +1263,10 @@ void isci_host_deinit(struct isci_host *ihost) { int i; + /* disable output data selects */ + for (i = 0; i < isci_gpio_count(ihost); i++) + writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); + isci_host_change_state(ihost, isci_stopping); for (i = 0; i < SCI_MAX_PORTS; i++) { struct isci_port *iport = &ihost->ports[i]; @@ -1281,6 +1285,12 @@ void isci_host_deinit(struct isci_host *ihost) spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); + + /* disable sgpio: where the above wait should give time for the + * enclosure to sample the gpios going inactive + */ + writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); + sci_controller_reset(ihost); /* Cancel any/all outstanding port timers */ @@ -2365,6 +2375,12 @@ int isci_host_init(struct isci_host *ihost) for (i = 0; i < SCI_MAX_PHYS; i++) isci_phy_init(&ihost->phys[i], ihost, i); + /* enable sgpio */ + writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); + for (i = 0; i < isci_gpio_count(ihost); i++) + writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); + writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); + for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct isci_remote_device *idev = &ihost->devices[i]; @@ -2760,3 +2776,56 @@ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, return status; } + +static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) +{ + int d; + + /* no support for TX_GP_CFG */ + if (reg_index == 0) + return -EINVAL; + + for (d = 0; d < isci_gpio_count(ihost); d++) { + u32 val = 0x444; /* all ODx.n clear */ + int i; + + for (i = 0; i < 3; i++) { + int bit = (i << 2) + 2; + + bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), + write_data, reg_index, + reg_count); + if (bit < 0) + break; + + /* if od is set, clear the 'invert' bit */ + val &= ~(bit << ((i << 2) + 2)); + } + + if (i < 3) + break; + writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); + } + + /* unless reg_index is > 1, we should always be able to write at + * least one register + */ + return d > 0; +} + +int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data) +{ + struct isci_host *ihost = sas_ha->lldd_ha; + int written; + + switch (reg_type) { + case SAS_GPIO_REG_TX_GP: + written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); + break; + default: + written = -EINVAL; + } + + return written; +} diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h index 9f33831a2f04..646051afd3cb 100644 --- a/drivers/scsi/isci/host.h +++ b/drivers/scsi/isci/host.h @@ -440,6 +440,18 @@ static inline bool is_c0(struct pci_dev *pdev) return false; } +/* set hw control for 'activity', even though active enclosures seem to drive + * the activity led on their own. Skip setting FSENG control on 'status' due + * to unexpected operation and 'error' due to not being a supported automatic + * FSENG output + */ +#define SGPIO_HW_CONTROL 0x00000443 + +static inline int isci_gpio_count(struct isci_host *ihost) +{ + return ARRAY_SIZE(ihost->scu_registers->peg0.sgpio.output_data_select); +} + void sci_controller_post_request(struct isci_host *ihost, u32 request); void sci_controller_release_frame(struct isci_host *ihost, @@ -542,4 +554,7 @@ void sci_port_configuration_agent_construct( enum sci_status sci_port_configuration_agent_initialize( struct isci_host *ihost, struct sci_port_configuration_agent *port_agent); + +int isci_gpio_write(struct sas_ha_struct *, u8 reg_type, u8 reg_index, + u8 reg_count, u8 *write_data); #endif diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c index 29aa34efb0f5..43fe840fbe9c 100644 --- a/drivers/scsi/isci/init.c +++ b/drivers/scsi/isci/init.c @@ -192,6 +192,9 @@ static struct sas_domain_function_template isci_transport_ops = { /* Phy management */ .lldd_control_phy = isci_phy_control, + + /* GPIO support */ + .lldd_write_gpio = isci_gpio_write, }; diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index d1de63312e7f..8efeb6b08321 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -97,7 +97,7 @@ #define SCU_MAX_COMPLETION_QUEUE_SHIFT (ilog2(SCU_MAX_COMPLETION_QUEUE_ENTRIES)) #define SCU_ABSOLUTE_MAX_UNSOLICITED_FRAMES (4096) -#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024) +#define SCU_UNSOLICITED_FRAME_BUFFER_SIZE (1024U) #define SCU_INVALID_FRAME_INDEX (0xFFFF) #define SCU_IO_REQUEST_MAX_SGE_SIZE (0x00FFFFFF) diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index 09e61134037f..35f50c2183e1 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -1313,6 +1313,17 @@ int isci_phy_control(struct asd_sas_phy *sas_phy, ret = isci_port_perform_hard_reset(ihost, iport, iphy); break; + case PHY_FUNC_GET_EVENTS: { + struct scu_link_layer_registers __iomem *r; + struct sas_phy *phy = sas_phy->phy; + + r = iphy->link_layer_registers; + phy->running_disparity_error_count = readl(&r->running_disparity_error_count); + phy->loss_of_dword_sync_count = readl(&r->loss_of_sync_error_count); + phy->phy_reset_problem_count = readl(&r->phy_reset_problem_count); + phy->invalid_dword_count = readl(&r->invalid_dword_counter); + break; + } default: dev_dbg(&ihost->pdev->dev, diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index 8f6f9b77e41a..8e59c8865dcd 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -294,8 +294,8 @@ static void isci_port_link_down(struct isci_host *isci_host, __func__, isci_device); set_bit(IDEV_GONE, &isci_device->flags); } + isci_port_change_state(isci_port, isci_stopping); } - isci_port_change_state(isci_port, isci_stopping); } /* Notify libsas of the borken link, this will trigger calls to our diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index 486b113c634a..38a99d281141 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -678,7 +678,7 @@ static void apc_agent_timeout(unsigned long data) configure_phy_mask = ~port_agent->phy_configured_mask & port_agent->phy_ready_mask; if (!configure_phy_mask) - return; + goto done; for (index = 0; index < SCI_MAX_PHYS; index++) { if ((configure_phy_mask & (1 << index)) == 0) diff --git a/drivers/scsi/isci/registers.h b/drivers/scsi/isci/registers.h index 00afc738bbed..eaa541afc755 100644 --- a/drivers/scsi/isci/registers.h +++ b/drivers/scsi/isci/registers.h @@ -875,122 +875,6 @@ struct scu_iit_entry { #define SCU_PTSxSR_GEN_BIT(name) \ SCU_GEN_BIT(SCU_PTSG_PORT_TASK_SCHEDULER_STATUS_ ## name) - -/* - * ***************************************************************************** - * * SGPIO Register shift and mask values - * ***************************************************************************** */ -#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_SHIFT (0) -#define SCU_SGPIO_CONTROL_SGPIO_ENABLE_MASK (0x00000001) -#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_SHIFT (1) -#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_CLOCK_SELECT_MASK (0x00000002) -#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_SHIFT (2) -#define SCU_SGPIO_CONTROL_SGPIO_SERIAL_SHIFT_WIDTH_SELECT_MASK (0x00000004) -#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_SHIFT (15) -#define SCU_SGPIO_CONTROL_SGPIO_TEST_BIT_MASK (0x00008000) -#define SCU_SGPIO_CONTROL_SGPIO_RESERVED_MASK (0xFFFF7FF8) - -#define SCU_SGICRx_GEN_BIT(name) \ - SCU_GEN_BIT(SCU_SGPIO_CONTROL_SGPIO_ ## name) - -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_SHIFT (0) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R0_MASK (0x0000000F) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_SHIFT (4) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R1_MASK (0x000000F0) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_SHIFT (8) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R2_MASK (0x00000F00) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_SHIFT (12) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_R3_MASK (0x0000F000) -#define SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_RESERVED_MASK (0xFFFF0000) - -#define SCU_SGPBRx_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_PROGRAMMABLE_BLINK_REGISTER_ ## name, value) - -#define SCU_SGPIO_START_DRIVE_LOWER_R0_SHIFT (0) -#define SCU_SGPIO_START_DRIVE_LOWER_R0_MASK (0x00000003) -#define SCU_SGPIO_START_DRIVE_LOWER_R1_SHIFT (4) -#define SCU_SGPIO_START_DRIVE_LOWER_R1_MASK (0x00000030) -#define SCU_SGPIO_START_DRIVE_LOWER_R2_SHIFT (8) -#define SCU_SGPIO_START_DRIVE_LOWER_R2_MASK (0x00000300) -#define SCU_SGPIO_START_DRIVE_LOWER_R3_SHIFT (12) -#define SCU_SGPIO_START_DRIVE_LOWER_R3_MASK (0x00003000) -#define SCU_SGPIO_START_DRIVE_LOWER_RESERVED_MASK (0xFFFF8888) - -#define SCU_SGSDLRx_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) - -#define SCU_SGPIO_START_DRIVE_UPPER_R0_SHIFT (0) -#define SCU_SGPIO_START_DRIVE_UPPER_R0_MASK (0x00000003) -#define SCU_SGPIO_START_DRIVE_UPPER_R1_SHIFT (4) -#define SCU_SGPIO_START_DRIVE_UPPER_R1_MASK (0x00000030) -#define SCU_SGPIO_START_DRIVE_UPPER_R2_SHIFT (8) -#define SCU_SGPIO_START_DRIVE_UPPER_R2_MASK (0x00000300) -#define SCU_SGPIO_START_DRIVE_UPPER_R3_SHIFT (12) -#define SCU_SGPIO_START_DRIVE_UPPER_R3_MASK (0x00003000) -#define SCU_SGPIO_START_DRIVE_UPPER_RESERVED_MASK (0xFFFF8888) - -#define SCU_SGSDURx_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_START_DRIVE_LOWER_ ## name, value) - -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_SHIFT (0) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D0_MASK (0x00000003) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_SHIFT (4) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D1_MASK (0x00000030) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_SHIFT (8) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D2_MASK (0x00000300) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_SHIFT (12) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_D3_MASK (0x00003000) -#define SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_RESERVED_MASK (0xFFFF8888) - -#define SCU_SGSIDLRx_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) - -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_SHIFT (0) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D0_MASK (0x00000003) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_SHIFT (4) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D1_MASK (0x00000030) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_SHIFT (8) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D2_MASK (0x00000300) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_SHIFT (12) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_D3_MASK (0x00003000) -#define SCU_SGPIO_SERIAL_INPUT_DATA_UPPER_RESERVED_MASK (0xFFFF8888) - -#define SCU_SGSIDURx_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_SERIAL_INPUT_DATA_LOWER_ ## name, value) - -#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_SHIFT (0) -#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_MASK (0x0000000F) -#define SCU_SGPIO_VENDOR_SPECIFIC_CODE_RESERVED_MASK (0xFFFFFFF0) - -#define SCU_SGVSCR_GEN_VAL(value) \ - SCU_GEN_VALUE(SCU_SGPIO_VENDOR_SPECIFIC_CODE ## name, value) - -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_SHIFT (0) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA0_MASK (0x00000003) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_SHIFT (2) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA0_MASK (0x00000004) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_SHIFT (3) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA0_MASK (0x00000008) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_SHIFT (4) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA1_MASK (0x00000030) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_SHIFT (6) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA1_MASK (0x00000040) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_SHIFT (7) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA1_MASK (0x00000080) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_SHIFT (8) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INPUT_DATA2_MASK (0x00000300) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_SHIFT (10) -#define SCU_SGPIO_OUPUT_DATA_SELECT_INVERT_INPUT_DATA2_MASK (0x00000400) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_SHIFT (11) -#define SCU_SGPIO_OUPUT_DATA_SELECT_JOG_ENABLE_DATA2_MASK (0x00000800) -#define SCU_SGPIO_OUPUT_DATA_SELECT_RESERVED_MASK (0xFFFFF000) - -#define SCU_SGODSR_GEN_VAL(name, value) \ - SCU_GEN_VALUE(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name, value) - -#define SCU_SGODSR_GEN_BIT(name) \ - SCU_GEN_BIT(SCU_SGPIO_OUPUT_DATA_SELECT_ ## name) - /* * ***************************************************************************** * * SMU Registers @@ -1529,10 +1413,12 @@ struct scu_sgpio_registers { u32 serial_input_upper; /* 0x0018 SGPIO_SGVSCR */ u32 vendor_specific_code; +/* 0x001C Reserved */ + u32 reserved_001c; /* 0x0020 SGPIO_SGODSR */ - u32 ouput_data_select[8]; + u32 output_data_select[8]; /* Remainder of memory space 256 bytes */ - u32 reserved_1444_14ff[0x31]; + u32 reserved_1444_14ff[0x30]; }; diff --git a/drivers/scsi/isci/remote_device.c b/drivers/scsi/isci/remote_device.c index b6e6368c2665..fbf9ce28c3f5 100644 --- a/drivers/scsi/isci/remote_device.c +++ b/drivers/scsi/isci/remote_device.c @@ -386,6 +386,18 @@ static bool is_remote_device_ready(struct isci_remote_device *idev) } } +/* + * called once the remote node context has transisitioned to a ready + * state (after suspending RX and/or TX due to early D2H fis) + */ +static void atapi_remote_device_resume_done(void *_dev) +{ + struct isci_remote_device *idev = _dev; + struct isci_request *ireq = idev->working_request; + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); +} + enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, u32 event_code) { @@ -432,6 +444,16 @@ enum sci_status sci_remote_device_event_handler(struct isci_remote_device *idev, if (status != SCI_SUCCESS) return status; + if (state == SCI_STP_DEV_ATAPI_ERROR) { + /* For ATAPI error state resume the RNC right away. */ + if (scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX || + scu_get_event_type(event_code) == SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX) { + return sci_remote_node_context_resume(&idev->rnc, + atapi_remote_device_resume_done, + idev); + } + } + if (state == SCI_STP_DEV_IDLE) { /* We pick up suspension events to handle specifically to this @@ -625,6 +647,7 @@ enum sci_status sci_remote_device_complete_io(struct isci_host *ihost, case SCI_STP_DEV_CMD: case SCI_STP_DEV_NCQ: case SCI_STP_DEV_NCQ_ERROR: + case SCI_STP_DEV_ATAPI_ERROR: status = common_complete_io(iport, idev, ireq); if (status != SCI_SUCCESS) break; @@ -1020,6 +1043,7 @@ static const struct sci_base_state sci_remote_device_state_table[] = { [SCI_STP_DEV_NCQ_ERROR] = { .enter_state = sci_stp_remote_device_ready_ncq_error_substate_enter, }, + [SCI_STP_DEV_ATAPI_ERROR] = { }, [SCI_STP_DEV_AWAIT_RESET] = { }, [SCI_SMP_DEV_IDLE] = { .enter_state = sci_smp_remote_device_ready_idle_substate_enter, diff --git a/drivers/scsi/isci/remote_device.h b/drivers/scsi/isci/remote_device.h index 57ccfc3d6ad3..e1747ea0d0ea 100644 --- a/drivers/scsi/isci/remote_device.h +++ b/drivers/scsi/isci/remote_device.h @@ -244,6 +244,15 @@ enum sci_remote_device_states { SCI_STP_DEV_NCQ_ERROR, /** + * This is the ATAPI error state for the STP ATAPI remote device. + * This state is entered when ATAPI device sends error status FIS + * without data while the device object is in CMD state. + * A suspension event is expected in this state. + * The device object will resume right away. + */ + SCI_STP_DEV_ATAPI_ERROR, + + /** * This is the READY substate indicates the device is waiting for the RESET task * coming to be recovered from certain hardware specific error. */ diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index b5d3a8c4d329..565a9f0a9bc2 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -481,7 +481,29 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq, } } +static void sci_atapi_construct(struct isci_request *ireq) +{ + struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; + struct sas_task *task; + + /* To simplify the implementation we take advantage of the + * silicon's partial acceleration of atapi protocol (dma data + * transfers), so we promote all commands to dma protocol. This + * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. + */ + h2d_fis->features |= ATAPI_PKT_DMA; + scu_stp_raw_request_construct_task_context(ireq); + + task = isci_request_access_task(ireq); + if (task->data_dir == DMA_NONE) + task->total_xfer_len = 0; + + /* clear the response so we can detect arrivial of an + * unsolicited h2d fis + */ + ireq->stp.rsp.fis_type = 0; +} static enum sci_status sci_io_request_construct_sata(struct isci_request *ireq, @@ -491,6 +513,7 @@ sci_io_request_construct_sata(struct isci_request *ireq, { enum sci_status status = SCI_SUCCESS; struct sas_task *task = isci_request_access_task(ireq); + struct domain_device *dev = ireq->target_device->domain_dev; /* check for management protocols */ if (ireq->ttype == tmf_task) { @@ -519,6 +542,13 @@ sci_io_request_construct_sata(struct isci_request *ireq, } + /* ATAPI */ + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && + task->ata_task.fis.command == ATA_CMD_PACKET) { + sci_atapi_construct(ireq); + return SCI_SUCCESS; + } + /* non data */ if (task->data_dir == DMA_NONE) { scu_stp_raw_request_construct_task_context(ireq); @@ -627,7 +657,7 @@ enum sci_status sci_task_request_construct_sata(struct isci_request *ireq) /** * sci_req_tx_bytes - bytes transferred when reply underruns request - * @sci_req: request that was terminated early + * @ireq: request that was terminated early */ #define SCU_TASK_CONTEXT_SRAM 0x200000 static u32 sci_req_tx_bytes(struct isci_request *ireq) @@ -729,6 +759,10 @@ sci_io_request_terminate(struct isci_request *ireq) case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED: case SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG: case SCI_REQ_STP_SOFT_RESET_WAIT_D2H: + case SCI_REQ_ATAPI_WAIT_H2D: + case SCI_REQ_ATAPI_WAIT_PIO_SETUP: + case SCI_REQ_ATAPI_WAIT_D2H: + case SCI_REQ_ATAPI_WAIT_TC_COMP: sci_change_state(&ireq->sm, SCI_REQ_ABORTING); return SCI_SUCCESS; case SCI_REQ_TASK_WAIT_TC_RESP: @@ -1194,8 +1228,8 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re { struct isci_stp_request *stp_req = &ireq->stp.req; struct scu_sgl_element_pair *sgl_pair; + enum sci_status status = SCI_SUCCESS; struct scu_sgl_element *sgl; - enum sci_status status; u32 offset; u32 len = 0; @@ -1249,7 +1283,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re */ static enum sci_status sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, - u8 *data_buf, u32 len) + u8 *data_buf, u32 len) { struct isci_request *ireq; u8 *src_addr; @@ -1423,6 +1457,128 @@ static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_re return status; } +static enum sci_status process_unsolicited_fis(struct isci_request *ireq, + u32 frame_index) +{ + struct isci_host *ihost = ireq->owning_controller; + enum sci_status status; + struct dev_to_host_fis *frame_header; + u32 *frame_buffer; + + status = sci_unsolicited_frame_control_get_header(&ihost->uf_control, + frame_index, + (void **)&frame_header); + + if (status != SCI_SUCCESS) + return status; + + if (frame_header->fis_type != FIS_REGD2H) { + dev_err(&ireq->isci_host->pdev->dev, + "%s ERROR: invalid fis type 0x%X\n", + __func__, frame_header->fis_type); + return SCI_FAILURE; + } + + sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, + frame_index, + (void **)&frame_buffer); + + sci_controller_copy_sata_response(&ireq->stp.rsp, + (u32 *)frame_header, + frame_buffer); + + /* Frame has been decoded return it to the controller */ + sci_controller_release_frame(ihost, frame_index); + + return status; +} + +static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, + u32 frame_index) +{ + struct sas_task *task = isci_request_access_task(ireq); + enum sci_status status; + + status = process_unsolicited_fis(ireq, frame_index); + + if (status == SCI_SUCCESS) { + if (ireq->stp.rsp.status & ATA_ERR) + status = SCI_IO_FAILURE_RESPONSE_VALID; + } else { + status = SCI_IO_FAILURE_RESPONSE_VALID; + } + + if (status != SCI_SUCCESS) { + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = status; + } else { + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + } + + /* the d2h ufi is the end of non-data commands */ + if (task->data_dir == DMA_NONE) + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + + return status; +} + +static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) +{ + struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); + void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; + struct scu_task_context *task_context = ireq->tc; + + /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame + * type. The TC for previous Packet fis was already there, we only need to + * change the H2D fis content. + */ + memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); + memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); + memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); + task_context->type.stp.fis_type = FIS_DATA; + task_context->transfer_length_bytes = dev->cdb_len; +} + +static void scu_atapi_construct_task_context(struct isci_request *ireq) +{ + struct ata_device *dev = sas_to_ata_dev(ireq->target_device->domain_dev); + struct sas_task *task = isci_request_access_task(ireq); + struct scu_task_context *task_context = ireq->tc; + int cdb_len = dev->cdb_len; + + /* reference: SSTL 1.13.4.2 + * task_type, sata_direction + */ + if (task->data_dir == DMA_TO_DEVICE) { + task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; + task_context->sata_direction = 0; + } else { + /* todo: for NO_DATA command, we need to send out raw frame. */ + task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; + task_context->sata_direction = 1; + } + + memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); + task_context->type.stp.fis_type = FIS_DATA; + + memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); + memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); + task_context->ssp_command_iu_length = cdb_len / sizeof(u32); + + /* task phase is set to TX_CMD */ + task_context->task_phase = 0x1; + + /* retry counter */ + task_context->stp_retry_count = 0; + + /* data transfer size. */ + task_context->transfer_length_bytes = task->total_xfer_len; + + /* setup sgl */ + sci_request_build_sgl(ireq); +} + enum sci_status sci_io_request_frame_handler(struct isci_request *ireq, u32 frame_index) @@ -1490,29 +1646,30 @@ sci_io_request_frame_handler(struct isci_request *ireq, return SCI_SUCCESS; case SCI_REQ_SMP_WAIT_RESP: { - struct smp_resp *rsp_hdr = &ireq->smp.rsp; - void *frame_header; + struct sas_task *task = isci_request_access_task(ireq); + struct scatterlist *sg = &task->smp_task.smp_resp; + void *frame_header, *kaddr; + u8 *rsp; sci_unsolicited_frame_control_get_header(&ihost->uf_control, - frame_index, - &frame_header); - - /* byte swap the header. */ - word_cnt = SMP_RESP_HDR_SZ / sizeof(u32); - sci_swab32_cpy(rsp_hdr, frame_header, word_cnt); + frame_index, + &frame_header); + kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); + rsp = kaddr + sg->offset; + sci_swab32_cpy(rsp, frame_header, 1); - if (rsp_hdr->frame_type == SMP_RESPONSE) { + if (rsp[0] == SMP_RESPONSE) { void *smp_resp; sci_unsolicited_frame_control_get_buffer(&ihost->uf_control, - frame_index, - &smp_resp); - - word_cnt = (sizeof(struct smp_resp) - SMP_RESP_HDR_SZ) / - sizeof(u32); + frame_index, + &smp_resp); - sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ, - smp_resp, word_cnt); + word_cnt = (sg->length/4)-1; + if (word_cnt > 0) + word_cnt = min_t(unsigned int, word_cnt, + SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); + sci_swab32_cpy(rsp + 4, smp_resp, word_cnt); ireq->scu_status = SCU_TASK_DONE_GOOD; ireq->sci_status = SCI_SUCCESS; @@ -1528,12 +1685,13 @@ sci_io_request_frame_handler(struct isci_request *ireq, __func__, ireq, frame_index, - rsp_hdr->frame_type); + rsp[0]); ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); } + kunmap_atomic(kaddr, KM_IRQ0); sci_controller_release_frame(ihost, frame_index); @@ -1833,6 +1991,24 @@ sci_io_request_frame_handler(struct isci_request *ireq, return status; } + case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { + struct sas_task *task = isci_request_access_task(ireq); + + sci_controller_release_frame(ihost, frame_index); + ireq->target_device->working_request = ireq; + if (task->data_dir == DMA_NONE) { + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_TC_COMP); + scu_atapi_reconstruct_raw_frame_task_context(ireq); + } else { + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); + scu_atapi_construct_task_context(ireq); + } + + sci_controller_continue_io(ireq); + return SCI_SUCCESS; + } + case SCI_REQ_ATAPI_WAIT_D2H: + return atapi_d2h_reg_frame_handler(ireq, frame_index); case SCI_REQ_ABORTING: /* * TODO: Is it even possible to get an unsolicited frame in the @@ -1898,10 +2074,9 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR): case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR): - case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR): sci_remote_device_suspend(ireq->target_device, SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code))); - /* Fall through to the default case */ + /* Fall through to the default case */ default: /* All other completion status cause the IO to be complete. */ ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); @@ -1964,6 +2139,112 @@ stp_request_soft_reset_await_h2d_diagnostic_tc_event(struct isci_request *ireq, return SCI_SUCCESS; } +static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, + enum sci_base_request_states next) +{ + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, next); + break; + default: + /* All other completion status cause the IO to be complete. + * If a NAK was received, then it is up to the user to retry + * the request. + */ + ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); + ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; + + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + } + + return status; +} + +static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, + u32 completion_code) +{ + struct isci_remote_device *idev = ireq->target_device; + struct dev_to_host_fis *d2h = &ireq->stp.rsp; + enum sci_status status = SCI_SUCCESS; + + switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { + case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { + u16 len = sci_req_tx_bytes(ireq); + + /* likely non-error data underrrun, workaround missing + * d2h frame from the controller + */ + if (d2h->fis_type != FIS_REGD2H) { + d2h->fis_type = FIS_REGD2H; + d2h->flags = (1 << 6); + d2h->status = 0x50; + d2h->error = 0; + d2h->lbal = 0; + d2h->byte_count_low = len & 0xff; + d2h->byte_count_high = len >> 8; + d2h->device = 0xa0; + d2h->lbal_exp = 0; + d2h->lbam_exp = 0; + d2h->lbah_exp = 0; + d2h->_r_a = 0; + d2h->sector_count = 0x3; + d2h->sector_count_exp = 0; + d2h->_r_b = 0; + d2h->_r_c = 0; + d2h->_r_d = 0; + } + + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; + status = ireq->sci_status; + + /* the hw will have suspended the rnc, so complete the + * request upon pending resume + */ + sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); + break; + } + case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): + /* In this case, there is no UF coming after. + * compelte the IO now. + */ + ireq->scu_status = SCU_TASK_DONE_GOOD; + ireq->sci_status = SCI_SUCCESS; + sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); + break; + + default: + if (d2h->fis_type == FIS_REGD2H) { + /* UF received change the device state to ATAPI_ERROR */ + status = ireq->sci_status; + sci_change_state(&idev->sm, SCI_STP_DEV_ATAPI_ERROR); + } else { + /* If receiving any non-sucess TC status, no UF + * received yet, then an UF for the status fis + * is coming after (XXX: suspect this is + * actually a protocol error or a bug like the + * DONE_UNEXP_FIS case) + */ + ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; + ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; + + sci_change_state(&ireq->sm, SCI_REQ_ATAPI_WAIT_D2H); + } + break; + } + + return status; +} + enum sci_status sci_io_request_tc_completion(struct isci_request *ireq, u32 completion_code) @@ -2015,6 +2296,17 @@ sci_io_request_tc_completion(struct isci_request *ireq, return request_aborting_state_tc_event(ireq, completion_code); + case SCI_REQ_ATAPI_WAIT_H2D: + return atapi_raw_completion(ireq, completion_code, + SCI_REQ_ATAPI_WAIT_PIO_SETUP); + + case SCI_REQ_ATAPI_WAIT_TC_COMP: + return atapi_raw_completion(ireq, completion_code, + SCI_REQ_ATAPI_WAIT_D2H); + + case SCI_REQ_ATAPI_WAIT_D2H: + return atapi_data_tc_completion_handler(ireq, completion_code); + default: dev_warn(&ihost->pdev->dev, "%s: SCIC IO Request given task completion " @@ -2421,6 +2713,8 @@ static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_ */ if (fis->status & ATA_DF) ts->stat = SAS_PROTO_RESPONSE; + else if (fis->status & ATA_ERR) + ts->stat = SAM_STAT_CHECK_CONDITION; else ts->stat = SAM_STAT_GOOD; @@ -2603,18 +2897,7 @@ static void isci_request_io_request_complete(struct isci_host *ihost, status = SAM_STAT_GOOD; set_bit(IREQ_COMPLETE_IN_TARGET, &request->flags); - if (task->task_proto == SAS_PROTOCOL_SMP) { - void *rsp = &request->smp.rsp; - - dev_dbg(&ihost->pdev->dev, - "%s: SMP protocol completion\n", - __func__); - - sg_copy_from_buffer( - &task->smp_task.smp_resp, 1, - rsp, sizeof(struct smp_resp)); - } else if (completion_status - == SCI_IO_SUCCESS_IO_DONE_EARLY) { + if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { /* This was an SSP / STP / SATA transfer. * There is a possibility that less data than @@ -2791,6 +3074,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) { struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); struct domain_device *dev = ireq->target_device->domain_dev; + enum sci_base_request_states state; struct sas_task *task; /* XXX as hch said always creating an internal sas_task for tmf @@ -2802,26 +3086,30 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm) * substates */ if (!task && dev->dev_type == SAS_END_DEV) { - sci_change_state(sm, SCI_REQ_TASK_WAIT_TC_COMP); + state = SCI_REQ_TASK_WAIT_TC_COMP; } else if (!task && (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high || isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) { - sci_change_state(sm, SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED); + state = SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED; } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { - sci_change_state(sm, SCI_REQ_SMP_WAIT_RESP); + state = SCI_REQ_SMP_WAIT_RESP; } else if (task && sas_protocol_ata(task->task_proto) && !task->ata_task.use_ncq) { - u32 state; - - if (task->data_dir == DMA_NONE) + if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && + task->ata_task.fis.command == ATA_CMD_PACKET) { + state = SCI_REQ_ATAPI_WAIT_H2D; + } else if (task->data_dir == DMA_NONE) { state = SCI_REQ_STP_NON_DATA_WAIT_H2D; - else if (task->ata_task.dma_xfer) + } else if (task->ata_task.dma_xfer) { state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; - else /* PIO */ + } else /* PIO */ { state = SCI_REQ_STP_PIO_WAIT_H2D; - - sci_change_state(sm, state); + } + } else { + /* SSP or NCQ are fully accelerated, no substates */ + return; } + sci_change_state(sm, state); } static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) @@ -2913,6 +3201,10 @@ static const struct sci_base_state sci_request_state_table[] = { [SCI_REQ_TASK_WAIT_TC_RESP] = { }, [SCI_REQ_SMP_WAIT_RESP] = { }, [SCI_REQ_SMP_WAIT_TC_COMP] = { }, + [SCI_REQ_ATAPI_WAIT_H2D] = { }, + [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, + [SCI_REQ_ATAPI_WAIT_D2H] = { }, + [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, [SCI_REQ_COMPLETED] = { .enter_state = sci_request_completed_state_enter, }, diff --git a/drivers/scsi/isci/request.h b/drivers/scsi/isci/request.h index 7a1d5a9778eb..f720b97b7bb5 100644 --- a/drivers/scsi/isci/request.h +++ b/drivers/scsi/isci/request.h @@ -96,7 +96,6 @@ enum sci_request_protocol { * to wait for another fis or if the transfer is complete. Upon * receipt of a d2h fis this will be the status field of that fis. * @sgl - track pio transfer progress as we iterate through the sgl - * @device_cdb_len - atapi device advertises it's transfer constraints at setup */ struct isci_stp_request { u32 pio_len; @@ -107,7 +106,6 @@ struct isci_stp_request { u8 set; u32 offset; } sgl; - u32 device_cdb_len; }; struct isci_request { @@ -174,9 +172,6 @@ struct isci_request { }; } ssp; struct { - struct smp_resp rsp; - } smp; - struct { struct isci_stp_request req; struct host_to_dev_fis cmd; struct dev_to_host_fis rsp; @@ -252,6 +247,32 @@ enum sci_base_request_states { SCI_REQ_STP_PIO_DATA_OUT, /* + * While in this state the IO request object is waiting for the TC + * completion notification for the H2D Register FIS + */ + SCI_REQ_ATAPI_WAIT_H2D, + + /* + * While in this state the IO request object is waiting for either a + * PIO Setup. + */ + SCI_REQ_ATAPI_WAIT_PIO_SETUP, + + /* + * The non-data IO transit to this state in this state after receiving + * TC completion. While in this state IO request object is waiting for + * D2H status frame as UF. + */ + SCI_REQ_ATAPI_WAIT_D2H, + + /* + * When transmitting raw frames hardware reports task context completion + * after every frame submission, so in the non-accelerated case we need + * to expect the completion for the "cdb" frame. + */ + SCI_REQ_ATAPI_WAIT_TC_COMP, + + /* * The AWAIT_TC_COMPLETION sub-state indicates that the started raw * task management request is waiting for the transmission of the * initial frame (i.e. command, task, etc.). diff --git a/drivers/scsi/isci/sas.h b/drivers/scsi/isci/sas.h index 462b15174d3f..dc26b4aea99e 100644 --- a/drivers/scsi/isci/sas.h +++ b/drivers/scsi/isci/sas.h @@ -204,8 +204,6 @@ struct smp_req { u8 req_data[0]; } __packed; -#define SMP_RESP_HDR_SZ 4 - /* * struct sci_sas_address - This structure depicts how a SAS address is * represented by SCI. diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c index d6bcdd013dc9..e2d9418683ce 100644 --- a/drivers/scsi/isci/task.c +++ b/drivers/scsi/isci/task.c @@ -1345,29 +1345,6 @@ static void isci_smp_task_done(struct sas_task *task) complete(&task->completion); } -static struct sas_task *isci_alloc_task(void) -{ - struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); - - if (task) { - INIT_LIST_HEAD(&task->list); - spin_lock_init(&task->task_state_lock); - task->task_state_flags = SAS_TASK_STATE_PENDING; - init_timer(&task->timer); - init_completion(&task->completion); - } - - return task; -} - -static void isci_free_task(struct isci_host *ihost, struct sas_task *task) -{ - if (task) { - BUG_ON(!list_empty(&task->list)); - kfree(task); - } -} - static int isci_smp_execute_task(struct isci_host *ihost, struct domain_device *dev, void *req, int req_size, void *resp, int resp_size) @@ -1376,7 +1353,7 @@ static int isci_smp_execute_task(struct isci_host *ihost, struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { - task = isci_alloc_task(); + task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -1439,13 +1416,13 @@ static int isci_smp_execute_task(struct isci_host *ihost, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); - isci_free_task(ihost, task); + sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); - isci_free_task(ihost, task); + sas_free_task(task); return res; } diff --git a/drivers/scsi/isci/task.h b/drivers/scsi/isci/task.h index 4a7fa90287ef..15b18d158993 100644 --- a/drivers/scsi/isci/task.h +++ b/drivers/scsi/isci/task.h @@ -286,6 +286,25 @@ isci_task_set_completion_status( task->task_status.resp = response; task->task_status.stat = status; + switch (task->task_proto) { + + case SAS_PROTOCOL_SATA: + case SAS_PROTOCOL_STP: + case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: + + if (task_notification_selection + == isci_perform_error_io_completion) { + /* SATA/STP I/O has it's own means of scheduling device + * error handling on the normal path. + */ + task_notification_selection + = isci_perform_normal_io_completion; + } + break; + default: + break; + } + switch (task_notification_selection) { case isci_perform_error_io_completion: diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 7724414588fa..23e706673d06 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -872,6 +872,61 @@ static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session) iscsi_host_free(shost); } +static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param) +{ + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_NETDEV_NAME: + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_HDRDGST_EN: + case ISCSI_PARAM_DATADGST_EN: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_EXP_STATSN: + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_PING_TMO: + case ISCSI_PARAM_RECV_TMO: + case ISCSI_PARAM_INITIAL_R2T_EN: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_IMM_DATA_EN: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_PDU_INORDER_EN: + case ISCSI_PARAM_DATASEQ_INORDER_EN: + case ISCSI_PARAM_ERL: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_USERNAME: + case ISCSI_PARAM_PASSWORD: + case ISCSI_PARAM_USERNAME_IN: + case ISCSI_PARAM_PASSWORD_IN: + case ISCSI_PARAM_FAST_ABORT: + case ISCSI_PARAM_ABORT_TMO: + case ISCSI_PARAM_LU_RESET_TMO: + case ISCSI_PARAM_TGT_RESET_TMO: + case ISCSI_PARAM_IFACE_NAME: + case ISCSI_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + } + + return 0; +} + static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev) { set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags); @@ -910,33 +965,6 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { .name = "tcp", .caps = CAP_RECOVERY_L0 | CAP_MULTI_R2T | CAP_HDRDGST | CAP_DATADGST, - .param_mask = ISCSI_MAX_RECV_DLENGTH | - ISCSI_MAX_XMIT_DLENGTH | - ISCSI_HDRDGST_EN | - ISCSI_DATADGST_EN | - ISCSI_INITIAL_R2T_EN | - ISCSI_MAX_R2T | - ISCSI_IMM_DATA_EN | - ISCSI_FIRST_BURST | - ISCSI_MAX_BURST | - ISCSI_PDU_INORDER_EN | - ISCSI_DATASEQ_INORDER_EN | - ISCSI_ERL | - ISCSI_CONN_PORT | - ISCSI_CONN_ADDRESS | - ISCSI_EXP_STATSN | - ISCSI_PERSISTENT_PORT | - ISCSI_PERSISTENT_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_USERNAME | ISCSI_PASSWORD | - ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN | - ISCSI_FAST_ABORT | ISCSI_ABORT_TMO | - ISCSI_LU_RESET_TMO | ISCSI_TGT_RESET_TMO | - ISCSI_PING_TMO | ISCSI_RECV_TMO | - ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME, - .host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME | - ISCSI_HOST_NETDEV_NAME, /* session management */ .create_session = iscsi_sw_tcp_session_create, .destroy_session = iscsi_sw_tcp_session_destroy, @@ -944,6 +972,7 @@ static struct iscsi_transport iscsi_sw_tcp_transport = { .create_conn = iscsi_sw_tcp_conn_create, .bind_conn = iscsi_sw_tcp_conn_bind, .destroy_conn = iscsi_sw_tcp_conn_destroy, + .attr_is_visible = iscsi_sw_tcp_attr_is_visible, .set_param = iscsi_sw_tcp_conn_set_param, .get_conn_param = iscsi_sw_tcp_conn_get_param, .get_session_param = iscsi_session_get_param, diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index d261e982a2fa..7c055fdca45d 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -65,16 +65,15 @@ static struct workqueue_struct *fc_exch_workqueue; * assigned range of exchanges to per cpu pool. */ struct fc_exch_pool { + spinlock_t lock; + struct list_head ex_list; u16 next_index; u16 total_exches; /* two cache of free slot in exch array */ u16 left; u16 right; - - spinlock_t lock; - struct list_head ex_list; -}; +} ____cacheline_aligned_in_smp; /** * struct fc_exch_mgr - The Exchange Manager (EM). @@ -91,13 +90,13 @@ struct fc_exch_pool { * It manages the allocation of exchange IDs. */ struct fc_exch_mgr { + struct fc_exch_pool *pool; + mempool_t *ep_pool; enum fc_class class; struct kref kref; u16 min_xid; u16 max_xid; - mempool_t *ep_pool; u16 pool_max_index; - struct fc_exch_pool *pool; /* * currently exchange mgr stats are updated but not used. diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 4c41ee816f0b..221875ec3d7c 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -759,7 +759,6 @@ static void fc_fcp_recv(struct fc_seq *seq, struct fc_frame *fp, void *arg) goto out; if (fc_fcp_lock_pkt(fsp)) goto out; - fsp->last_pkt_time = jiffies; if (fh->fh_type == FC_TYPE_BLS) { fc_fcp_abts_resp(fsp, fp); @@ -1148,7 +1147,6 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, rc = -1; goto unlock; } - fsp->last_pkt_time = jiffies; fsp->seq_ptr = seq; fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index 256a999d010b..d7c76f2eb636 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -3163,7 +3163,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, { struct iscsi_conn *conn = cls_conn->dd_data; struct iscsi_session *session = conn->session; - uint32_t value; switch(param) { case ISCSI_PARAM_FAST_ABORT: @@ -3220,14 +3219,6 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn, case ISCSI_PARAM_ERL: sscanf(buf, "%d", &session->erl); break; - case ISCSI_PARAM_IFMARKER_EN: - sscanf(buf, "%d", &value); - BUG_ON(value); - break; - case ISCSI_PARAM_OFMARKER_EN: - sscanf(buf, "%d", &value); - BUG_ON(value); - break; case ISCSI_PARAM_EXP_STATSN: sscanf(buf, "%u", &conn->exp_statsn); break; diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index f5831930df9b..54a5199ceb56 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -219,17 +219,20 @@ out_err2: /* ---------- Device registration and unregistration ---------- */ -static inline void sas_unregister_common_dev(struct domain_device *dev) +static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev) { sas_notify_lldd_dev_gone(dev); if (!dev->parent) dev->port->port_dev = NULL; else list_del_init(&dev->siblings); + + spin_lock_irq(&port->dev_list_lock); list_del_init(&dev->dev_list_node); + spin_unlock_irq(&port->dev_list_lock); } -void sas_unregister_dev(struct domain_device *dev) +void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev) { if (dev->rphy) { sas_remove_children(&dev->rphy->dev); @@ -241,15 +244,15 @@ void sas_unregister_dev(struct domain_device *dev) kfree(dev->ex_dev.ex_phy); dev->ex_dev.ex_phy = NULL; } - sas_unregister_common_dev(dev); + sas_unregister_common_dev(port, dev); } void sas_unregister_domain_devices(struct asd_sas_port *port) { struct domain_device *dev, *n; - list_for_each_entry_safe_reverse(dev,n,&port->dev_list,dev_list_node) - sas_unregister_dev(dev); + list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) + sas_unregister_dev(port, dev); port->port->rphy = NULL; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 16ad97df5ba6..1b831c55ec6e 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -199,6 +199,8 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, phy->virtual = dr->virtual; phy->last_da_index = -1; + phy->phy->identify.sas_address = SAS_ADDR(phy->attached_sas_addr); + phy->phy->identify.device_type = phy->attached_dev_type; phy->phy->identify.initiator_port_protocols = phy->attached_iproto; phy->phy->identify.target_port_protocols = phy->attached_tproto; phy->phy->identify.phy_identifier = phy_id; @@ -329,6 +331,7 @@ static void ex_assign_report_general(struct domain_device *dev, dev->ex_dev.ex_change_count = be16_to_cpu(rg->change_count); dev->ex_dev.max_route_indexes = be16_to_cpu(rg->route_indexes); dev->ex_dev.num_phys = min(rg->num_phys, (u8)MAX_EXPANDER_PHYS); + dev->ex_dev.t2t_supp = rg->t2t_supp; dev->ex_dev.conf_route_table = rg->conf_route_table; dev->ex_dev.configuring = rg->configuring; memcpy(dev->ex_dev.enclosure_logical_id, rg->enclosure_logical_id, 8); @@ -751,7 +754,10 @@ static struct domain_device *sas_ex_discover_end_dev( out_list_del: sas_rphy_free(child->rphy); child->rphy = NULL; + + spin_lock_irq(&parent->port->dev_list_lock); list_del(&child->dev_list_node); + spin_unlock_irq(&parent->port->dev_list_lock); out_free: sas_port_delete(phy->port); out_err: @@ -1133,15 +1139,17 @@ static void sas_print_parent_topology_bug(struct domain_device *child, }; struct domain_device *parent = child->parent; - sas_printk("%s ex %016llx phy 0x%x <--> %s ex %016llx phy 0x%x " - "has %c:%c routing link!\n", + sas_printk("%s ex %016llx (T2T supp:%d) phy 0x%x <--> %s ex %016llx " + "(T2T supp:%d) phy 0x%x has %c:%c routing link!\n", ex_type[parent->dev_type], SAS_ADDR(parent->sas_addr), + parent->ex_dev.t2t_supp, parent_phy->phy_id, ex_type[child->dev_type], SAS_ADDR(child->sas_addr), + child->ex_dev.t2t_supp, child_phy->phy_id, ra_char[parent_phy->routing_attr], @@ -1238,10 +1246,15 @@ static int sas_check_parent_topology(struct domain_device *child) sas_print_parent_topology_bug(child, parent_phy, child_phy); res = -ENODEV; } - } else if (parent_phy->routing_attr == TABLE_ROUTING && - child_phy->routing_attr != SUBTRACTIVE_ROUTING) { - sas_print_parent_topology_bug(child, parent_phy, child_phy); - res = -ENODEV; + } else if (parent_phy->routing_attr == TABLE_ROUTING) { + if (child_phy->routing_attr == SUBTRACTIVE_ROUTING || + (child_phy->routing_attr == TABLE_ROUTING && + child_ex->t2t_supp && parent_ex->t2t_supp)) { + /* All good */; + } else { + sas_print_parent_topology_bug(child, parent_phy, child_phy); + res = -ENODEV; + } } break; case FANOUT_DEV: @@ -1729,7 +1742,7 @@ out: return res; } -static void sas_unregister_ex_tree(struct domain_device *dev) +static void sas_unregister_ex_tree(struct asd_sas_port *port, struct domain_device *dev) { struct expander_device *ex = &dev->ex_dev; struct domain_device *child, *n; @@ -1738,11 +1751,11 @@ static void sas_unregister_ex_tree(struct domain_device *dev) child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) - sas_unregister_ex_tree(child); + sas_unregister_ex_tree(port, child); else - sas_unregister_dev(child); + sas_unregister_dev(port, child); } - sas_unregister_dev(dev); + sas_unregister_dev(port, dev); } static void sas_unregister_devs_sas_addr(struct domain_device *parent, @@ -1759,9 +1772,9 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, child->gone = 1; if (child->dev_type == EDGE_DEV || child->dev_type == FANOUT_DEV) - sas_unregister_ex_tree(child); + sas_unregister_ex_tree(parent->port, child); else - sas_unregister_dev(child); + sas_unregister_dev(parent->port, child); break; } } diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c index 04ad8dd1a74c..e1aa17840c5b 100644 --- a/drivers/scsi/libsas/sas_host_smp.c +++ b/drivers/scsi/libsas/sas_host_smp.c @@ -51,6 +51,91 @@ static void sas_host_smp_discover(struct sas_ha_struct *sas_ha, u8 *resp_data, resp_data[15] = rphy->identify.target_port_protocols; } +/** + * to_sas_gpio_gp_bit - given the gpio frame data find the byte/bit position of 'od' + * @od: od bit to find + * @data: incoming bitstream (from frame) + * @index: requested data register index (from frame) + * @count: total number of registers in the bitstream (from frame) + * @bit: bit position of 'od' in the returned byte + * + * returns NULL if 'od' is not in 'data' + * + * From SFF-8485 v0.7: + * "In GPIO_TX[1], bit 0 of byte 3 contains the first bit (i.e., OD0.0) + * and bit 7 of byte 0 contains the 32nd bit (i.e., OD10.1). + * + * In GPIO_TX[2], bit 0 of byte 3 contains the 33rd bit (i.e., OD10.2) + * and bit 7 of byte 0 contains the 64th bit (i.e., OD21.0)." + * + * The general-purpose (raw-bitstream) RX registers have the same layout + * although 'od' is renamed 'id' for 'input data'. + * + * SFF-8489 defines the behavior of the LEDs in response to the 'od' values. + */ +static u8 *to_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count, u8 *bit) +{ + unsigned int reg; + u8 byte; + + /* gp registers start at index 1 */ + if (index == 0) + return NULL; + + index--; /* make index 0-based */ + if (od < index * 32) + return NULL; + + od -= index * 32; + reg = od >> 5; + + if (reg >= count) + return NULL; + + od &= (1 << 5) - 1; + byte = 3 - (od >> 3); + *bit = od & ((1 << 3) - 1); + + return &data[reg * 4 + byte]; +} + +int try_test_sas_gpio_gp_bit(unsigned int od, u8 *data, u8 index, u8 count) +{ + u8 *byte; + u8 bit; + + byte = to_sas_gpio_gp_bit(od, data, index, count, &bit); + if (!byte) + return -1; + + return (*byte >> bit) & 1; +} +EXPORT_SYMBOL(try_test_sas_gpio_gp_bit); + +static int sas_host_smp_write_gpio(struct sas_ha_struct *sas_ha, u8 *resp_data, + u8 reg_type, u8 reg_index, u8 reg_count, + u8 *req_data) +{ + struct sas_internal *i = to_sas_internal(sas_ha->core.shost->transportt); + int written; + + if (i->dft->lldd_write_gpio == NULL) { + resp_data[2] = SMP_RESP_FUNC_UNK; + return 0; + } + + written = i->dft->lldd_write_gpio(sas_ha, reg_type, reg_index, + reg_count, req_data); + + if (written < 0) { + resp_data[2] = SMP_RESP_FUNC_FAILED; + written = 0; + } else + resp_data[2] = SMP_RESP_FUNC_ACC; + + return written; +} + static void sas_report_phy_sata(struct sas_ha_struct *sas_ha, u8 *resp_data, u8 phy_id) { @@ -230,9 +315,23 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, /* Can't implement; hosts have no routes */ break; - case SMP_WRITE_GPIO_REG: - /* FIXME: need GPIO support in the transport class */ + case SMP_WRITE_GPIO_REG: { + /* SFF-8485 v0.7 */ + const int base_frame_size = 11; + int to_write = req_data[4]; + + if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || + req->resid_len < base_frame_size + to_write * 4) { + resp_data[2] = SMP_RESP_INV_FRM_LEN; + break; + } + + to_write = sas_host_smp_write_gpio(sas_ha, resp_data, req_data[2], + req_data[3], to_write, &req_data[8]); + req->resid_len -= base_frame_size + to_write * 4; + rsp->resid_len -= 8; break; + } case SMP_CONF_ROUTE_INFO: /* Can't implement; hosts have no routes */ diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 2dc55343f671..d81c3b1989f7 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -37,7 +37,32 @@ #include "../scsi_sas_internal.h" -struct kmem_cache *sas_task_cache; +static struct kmem_cache *sas_task_cache; + +struct sas_task *sas_alloc_task(gfp_t flags) +{ + struct sas_task *task = kmem_cache_zalloc(sas_task_cache, flags); + + if (task) { + INIT_LIST_HEAD(&task->list); + spin_lock_init(&task->task_state_lock); + task->task_state_flags = SAS_TASK_STATE_PENDING; + init_timer(&task->timer); + init_completion(&task->completion); + } + + return task; +} +EXPORT_SYMBOL_GPL(sas_alloc_task); + +void sas_free_task(struct sas_task *task) +{ + if (task) { + BUG_ON(!list_empty(&task->list)); + kmem_cache_free(sas_task_cache, task); + } +} +EXPORT_SYMBOL_GPL(sas_free_task); /*------------ SAS addr hash -----------*/ void sas_hash_addr(u8 *hashed, const u8 *sas_addr) @@ -152,10 +177,15 @@ int sas_unregister_ha(struct sas_ha_struct *sas_ha) static int sas_get_linkerrors(struct sas_phy *phy) { - if (scsi_is_sas_phy_local(phy)) - /* FIXME: we have no local phy stats - * gathering at this time */ - return -EINVAL; + if (scsi_is_sas_phy_local(phy)) { + struct Scsi_Host *shost = dev_to_shost(phy->dev.parent); + struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); + struct asd_sas_phy *asd_phy = sas_ha->sas_phy[phy->number]; + struct sas_internal *i = + to_sas_internal(sas_ha->core.shost->transportt); + + return i->dft->lldd_control_phy(asd_phy, PHY_FUNC_GET_EVENTS, NULL); + } return sas_smp_get_phy_events(phy); } @@ -293,8 +323,7 @@ EXPORT_SYMBOL_GPL(sas_domain_release_transport); static int __init sas_class_init(void) { - sas_task_cache = kmem_cache_create("sas_task", sizeof(struct sas_task), - 0, SLAB_HWCACHE_ALIGN, NULL); + sas_task_cache = KMEM_CACHE(sas_task, SLAB_HWCACHE_ALIGN); if (!sas_task_cache) return -ENOMEM; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index eeba76cdf774..b2c4a7731656 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -182,79 +182,56 @@ int sas_queue_up(struct sas_task *task) return 0; } -/** - * sas_queuecommand -- Enqueue a command for processing - * @parameters: See SCSI Core documentation - * - * Note: XXX: Remove the host unlock/lock pair when SCSI Core can - * call us without holding an IRQ spinlock... - */ -static int sas_queuecommand_lck(struct scsi_cmnd *cmd, - void (*scsi_done)(struct scsi_cmnd *)) - __releases(host->host_lock) - __acquires(dev->sata_dev.ap->lock) - __releases(dev->sata_dev.ap->lock) - __acquires(host->host_lock) +int sas_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) { - int res = 0; - struct domain_device *dev = cmd_to_domain_dev(cmd); - struct Scsi_Host *host = cmd->device->host; struct sas_internal *i = to_sas_internal(host->transportt); + struct domain_device *dev = cmd_to_domain_dev(cmd); + struct sas_ha_struct *sas_ha = dev->port->ha; + struct sas_task *task; + int res = 0; - spin_unlock_irq(host->host_lock); + /* If the device fell off, no sense in issuing commands */ + if (dev->gone) { + cmd->result = DID_BAD_TARGET << 16; + goto out_done; + } - { - struct sas_ha_struct *sas_ha = dev->port->ha; - struct sas_task *task; - - /* If the device fell off, no sense in issuing commands */ - if (dev->gone) { - cmd->result = DID_BAD_TARGET << 16; - scsi_done(cmd); - goto out; - } + if (dev_is_sata(dev)) { + unsigned long flags; - if (dev_is_sata(dev)) { - unsigned long flags; + spin_lock_irqsave(dev->sata_dev.ap->lock, flags); + res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); + spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); + return res; + } - spin_lock_irqsave(dev->sata_dev.ap->lock, flags); - res = ata_sas_queuecmd(cmd, dev->sata_dev.ap); - spin_unlock_irqrestore(dev->sata_dev.ap->lock, flags); - goto out; - } + task = sas_create_task(cmd, dev, GFP_ATOMIC); + if (!task) + return SCSI_MLQUEUE_HOST_BUSY; - res = -ENOMEM; - task = sas_create_task(cmd, dev, GFP_ATOMIC); - if (!task) - goto out; + /* Queue up, Direct Mode or Task Collector Mode. */ + if (sas_ha->lldd_max_execute_num < 2) + res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); + else + res = sas_queue_up(task); - cmd->scsi_done = scsi_done; - /* Queue up, Direct Mode or Task Collector Mode. */ - if (sas_ha->lldd_max_execute_num < 2) - res = i->dft->lldd_execute_task(task, 1, GFP_ATOMIC); - else - res = sas_queue_up(task); + if (res) + goto out_free_task; + return 0; - /* Examine */ - if (res) { - SAS_DPRINTK("lldd_execute_task returned: %d\n", res); - ASSIGN_SAS_TASK(cmd, NULL); - sas_free_task(task); - if (res == -SAS_QUEUE_FULL) { - cmd->result = DID_SOFT_ERROR << 16; /* retry */ - res = 0; - scsi_done(cmd); - } - goto out; - } - } -out: - spin_lock_irq(host->host_lock); - return res; +out_free_task: + SAS_DPRINTK("lldd_execute_task returned: %d\n", res); + ASSIGN_SAS_TASK(cmd, NULL); + sas_free_task(task); + if (res == -SAS_QUEUE_FULL) + cmd->result = DID_SOFT_ERROR << 16; /* retry */ + else + cmd->result = DID_ERROR << 16; +out_done: + cmd->scsi_done(cmd); + return 0; } -DEF_SCSI_QCMD(sas_queuecommand) - static void sas_eh_finish_cmd(struct scsi_cmnd *cmd) { struct sas_task *task = TO_SAS_TASK(cmd); @@ -784,8 +761,7 @@ int sas_target_alloc(struct scsi_target *starget) return 0; } -#define SAS_DEF_QD 32 -#define SAS_MAX_QD 64 +#define SAS_DEF_QD 256 int sas_slave_configure(struct scsi_device *scsi_dev) { @@ -825,34 +801,41 @@ void sas_slave_destroy(struct scsi_device *scsi_dev) struct domain_device *dev = sdev_to_domain_dev(scsi_dev); if (dev_is_sata(dev)) - dev->sata_dev.ap->link.device[0].class = ATA_DEV_NONE; + sas_to_ata_dev(dev)->class = ATA_DEV_NONE; } -int sas_change_queue_depth(struct scsi_device *scsi_dev, int new_depth, - int reason) +int sas_change_queue_depth(struct scsi_device *sdev, int depth, int reason) { - int res = min(new_depth, SAS_MAX_QD); + struct domain_device *dev = sdev_to_domain_dev(sdev); - if (reason != SCSI_QDEPTH_DEFAULT) + if (dev_is_sata(dev)) + return __ata_change_queue_depth(dev->sata_dev.ap, sdev, depth, + reason); + + switch (reason) { + case SCSI_QDEPTH_DEFAULT: + case SCSI_QDEPTH_RAMP_UP: + if (!sdev->tagged_supported) + depth = 1; + scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth); + break; + case SCSI_QDEPTH_QFULL: + scsi_track_queue_full(sdev, depth); + break; + default: return -EOPNOTSUPP; - - if (scsi_dev->tagged_supported) - scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), - res); - else { - struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - sas_printk("device %llx LUN %x queue depth changed to 1\n", - SAS_ADDR(dev->sas_addr), - scsi_dev->lun); - scsi_adjust_queue_depth(scsi_dev, 0, 1); - res = 1; } - return res; + return depth; } int sas_change_queue_type(struct scsi_device *scsi_dev, int qt) { + struct domain_device *dev = sdev_to_domain_dev(scsi_dev); + + if (dev_is_sata(dev)) + return -EINVAL; + if (!scsi_dev->tagged_supported) return 0; diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index c088a36d1f33..bb4c8e0584e2 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h @@ -846,8 +846,24 @@ struct lpfc_hba { struct dentry *debug_hbqinfo; struct dentry *debug_dumpHostSlim; struct dentry *debug_dumpHBASlim; - struct dentry *debug_dumpData; /* BlockGuard BPL*/ - struct dentry *debug_dumpDif; /* BlockGuard BPL*/ + struct dentry *debug_dumpData; /* BlockGuard BPL */ + struct dentry *debug_dumpDif; /* BlockGuard BPL */ + struct dentry *debug_InjErrLBA; /* LBA to inject errors at */ + struct dentry *debug_writeGuard; /* inject write guard_tag errors */ + struct dentry *debug_writeApp; /* inject write app_tag errors */ + struct dentry *debug_writeRef; /* inject write ref_tag errors */ + struct dentry *debug_readApp; /* inject read app_tag errors */ + struct dentry *debug_readRef; /* inject read ref_tag errors */ + + /* T10 DIF error injection */ + uint32_t lpfc_injerr_wgrd_cnt; + uint32_t lpfc_injerr_wapp_cnt; + uint32_t lpfc_injerr_wref_cnt; + uint32_t lpfc_injerr_rapp_cnt; + uint32_t lpfc_injerr_rref_cnt; + sector_t lpfc_injerr_lba; +#define LPFC_INJERR_LBA_OFF (sector_t)0xffffffffffffffff + struct dentry *debug_slow_ring_trc; struct lpfc_debugfs_trc *slow_ring_trc; atomic_t slow_ring_trc_cnt; diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 2542f1f8bf86..4b0333ee2d94 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c @@ -52,6 +52,13 @@ #define LPFC_MIN_DEVLOSS_TMO 1 #define LPFC_MAX_DEVLOSS_TMO 255 +/* + * Write key size should be multiple of 4. If write key is changed + * make sure that library write key is also changed. + */ +#define LPFC_REG_WRITE_KEY_SIZE 4 +#define LPFC_REG_WRITE_KEY "EMLX" + /** * lpfc_jedec_to_ascii - Hex to ascii convertor according to JEDEC rules * @incr: integer to convert. @@ -693,7 +700,7 @@ lpfc_selective_reset(struct lpfc_hba *phba) int rc; if (!phba->cfg_enable_hba_reset) - return -EIO; + return -EACCES; status = lpfc_do_offline(phba, LPFC_EVT_OFFLINE); @@ -742,9 +749,11 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - int status = -EINVAL; + if (!phba->cfg_enable_hba_reset) + return -EACCES; + if (strncmp(buf, "selective", sizeof("selective") - 1) == 0) status = phba->lpfc_selective_reset(phba); @@ -765,16 +774,21 @@ lpfc_issue_reset(struct device *dev, struct device_attribute *attr, * Returns: * zero for success **/ -static int +int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *phba) { - struct lpfc_register portstat_reg; + struct lpfc_register portstat_reg = {0}; int i; - + msleep(100); lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, &portstat_reg.word0); + /* verify if privilaged for the request operation */ + if (!bf_get(lpfc_sliport_status_rn, &portstat_reg) && + !bf_get(lpfc_sliport_status_err, &portstat_reg)) + return -EPERM; + /* wait for the SLI port firmware ready after firmware reset */ for (i = 0; i < LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT; i++) { msleep(10); @@ -816,16 +830,13 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) int rc; if (!phba->cfg_enable_hba_reset) - return -EIO; + return -EACCES; if ((phba->sli_rev < LPFC_SLI_REV4) || (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_2)) return -EPERM; - if (!pdev->is_physfn) - return -EPERM; - /* Disable SR-IOV virtual functions if enabled */ if (phba->cfg_sriov_nr_virtfn) { pci_disable_sriov(pdev); @@ -858,7 +869,7 @@ lpfc_sli4_pdev_reg_request(struct lpfc_hba *phba, uint32_t opcode) rc = lpfc_sli4_pdev_status_reg_wait(phba); if (rc) - return -EIO; + return rc; init_completion(&online_compl); rc = lpfc_workq_post_event(phba, &status, &online_compl, @@ -984,7 +995,7 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr, if (!status) return strlen(buf); else - return -EIO; + return status; } /** @@ -3885,18 +3896,23 @@ sysfs_ctlreg_write(struct file *filp, struct kobject *kobj, if ((off + count) > FF_REG_AREA_SIZE) return -ERANGE; - if (count == 0) return 0; + if (count <= LPFC_REG_WRITE_KEY_SIZE) + return 0; if (off % 4 || count % 4 || (unsigned long)buf % 4) return -EINVAL; - if (!(vport->fc_flag & FC_OFFLINE_MODE)) { + /* This is to protect HBA registers from accidental writes. */ + if (memcmp(buf, LPFC_REG_WRITE_KEY, LPFC_REG_WRITE_KEY_SIZE)) + return -EINVAL; + + if (!(vport->fc_flag & FC_OFFLINE_MODE)) return -EPERM; - } spin_lock_irq(&phba->hbalock); - for (buf_off = 0; buf_off < count; buf_off += sizeof(uint32_t)) - writel(*((uint32_t *)(buf + buf_off)), + for (buf_off = 0; buf_off < count - LPFC_REG_WRITE_KEY_SIZE; + buf_off += sizeof(uint32_t)) + writel(*((uint32_t *)(buf + buf_off + LPFC_REG_WRITE_KEY_SIZE)), phba->ctrl_regs_memmap_p + off + buf_off); spin_unlock_irq(&phba->hbalock); @@ -4097,8 +4113,10 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, struct Scsi_Host *shost = class_to_shost(dev); struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata; struct lpfc_hba *phba = vport->phba; - int rc; + LPFC_MBOXQ_t *mboxq; MAILBOX_t *pmb; + uint32_t mbox_tmo; + int rc; if (off > MAILBOX_CMD_SIZE) return -ERANGE; @@ -4123,7 +4141,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, if (off == 0 && phba->sysfs_mbox.state == SMBOX_WRITING && phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) { - pmb = &phba->sysfs_mbox.mbox->u.mb; + mboxq = (LPFC_MBOXQ_t *)&phba->sysfs_mbox.mbox; + pmb = &mboxq->u.mb; switch (pmb->mbxCommand) { /* Offline only */ case MBX_INIT_LINK: @@ -4233,9 +4252,8 @@ sysfs_mbox_read(struct file *filp, struct kobject *kobj, } else { spin_unlock_irq(&phba->hbalock); - rc = lpfc_sli_issue_mbox_wait (phba, - phba->sysfs_mbox.mbox, - lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ); + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); spin_lock_irq(&phba->hbalock); } @@ -4480,9 +4498,10 @@ lpfc_get_host_fabric_name (struct Scsi_Host *shost) spin_lock_irq(shost->host_lock); - if ((vport->fc_flag & FC_FABRIC) || - ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && - (vport->fc_flag & FC_PUBLIC_LOOP))) + if ((vport->port_state > LPFC_FLOGI) && + ((vport->fc_flag & FC_FABRIC) || + ((phba->fc_topology == LPFC_TOPOLOGY_LOOP) && + (vport->fc_flag & FC_PUBLIC_LOOP)))) node_name = wwn_to_u64(phba->fc_fabparam.nodeName.u.wwn); else /* fabric is local port if there is no F/FL_Port */ @@ -4555,9 +4574,17 @@ lpfc_get_stats(struct Scsi_Host *shost) memset(hs, 0, sizeof (struct fc_host_statistics)); hs->tx_frames = pmb->un.varRdStatus.xmitFrameCnt; - hs->tx_words = (pmb->un.varRdStatus.xmitByteCnt * 256); + /* + * The MBX_READ_STATUS returns tx_k_bytes which has to + * converted to words + */ + hs->tx_words = (uint64_t) + ((uint64_t)pmb->un.varRdStatus.xmitByteCnt + * (uint64_t)256); hs->rx_frames = pmb->un.varRdStatus.rcvFrameCnt; - hs->rx_words = (pmb->un.varRdStatus.rcvByteCnt * 256); + hs->rx_words = (uint64_t) + ((uint64_t)pmb->un.varRdStatus.rcvByteCnt + * (uint64_t)256); memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t)); pmb->mbxCommand = MBX_READ_LNK_STAT; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index a6db6aef1331..60f95347babf 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -209,7 +209,7 @@ void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_mbox_dev_check(struct lpfc_hba *); -int lpfc_mbox_tmo_val(struct lpfc_hba *, int); +int lpfc_mbox_tmo_val(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *); void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t); void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t); @@ -451,3 +451,5 @@ int lpfc_wr_object(struct lpfc_hba *, struct list_head *, uint32_t, uint32_t *); /* functions to support SR-IOV */ int lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *, int); uint16_t lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *); +int lpfc_sli4_queue_create(struct lpfc_hba *); +void lpfc_sli4_queue_destroy(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 779b88e1469d..707081d0a226 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -1856,6 +1856,9 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) case 2: c = 'B'; break; + case 3: + c = 'X'; + break; default: c = 0; break; diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a0424dd90e40..2cd844f7058f 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -996,6 +996,85 @@ lpfc_debugfs_dumpDataDif_write(struct file *file, const char __user *buf, return nbytes; } +static int +lpfc_debugfs_dif_err_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t +lpfc_debugfs_dif_err_read(struct file *file, char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_dentry; + struct lpfc_hba *phba = file->private_data; + char cbuf[16]; + int cnt = 0; + + if (dent == phba->debug_writeGuard) + cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wgrd_cnt); + else if (dent == phba->debug_writeApp) + cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wapp_cnt); + else if (dent == phba->debug_writeRef) + cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_wref_cnt); + else if (dent == phba->debug_readApp) + cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rapp_cnt); + else if (dent == phba->debug_readRef) + cnt = snprintf(cbuf, 16, "%u\n", phba->lpfc_injerr_rref_cnt); + else if (dent == phba->debug_InjErrLBA) + cnt = snprintf(cbuf, 16, "0x%lx\n", + (unsigned long) phba->lpfc_injerr_lba); + else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0547 Unknown debugfs error injection entry\n"); + + return simple_read_from_buffer(buf, nbytes, ppos, &cbuf, cnt); +} + +static ssize_t +lpfc_debugfs_dif_err_write(struct file *file, const char __user *buf, + size_t nbytes, loff_t *ppos) +{ + struct dentry *dent = file->f_dentry; + struct lpfc_hba *phba = file->private_data; + char dstbuf[32]; + unsigned long tmp; + int size; + + memset(dstbuf, 0, 32); + size = (nbytes < 32) ? nbytes : 32; + if (copy_from_user(dstbuf, buf, size)) + return 0; + + if (strict_strtoul(dstbuf, 0, &tmp)) + return 0; + + if (dent == phba->debug_writeGuard) + phba->lpfc_injerr_wgrd_cnt = (uint32_t)tmp; + else if (dent == phba->debug_writeApp) + phba->lpfc_injerr_wapp_cnt = (uint32_t)tmp; + else if (dent == phba->debug_writeRef) + phba->lpfc_injerr_wref_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readApp) + phba->lpfc_injerr_rapp_cnt = (uint32_t)tmp; + else if (dent == phba->debug_readRef) + phba->lpfc_injerr_rref_cnt = (uint32_t)tmp; + else if (dent == phba->debug_InjErrLBA) + phba->lpfc_injerr_lba = (sector_t)tmp; + else + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "0548 Unknown debugfs error injection entry\n"); + + return nbytes; +} + +static int +lpfc_debugfs_dif_err_release(struct inode *inode, struct file *file) +{ + return 0; +} + /** * lpfc_debugfs_nodelist_open - Open the nodelist debugfs file * @inode: The inode pointer that contains a vport pointer. @@ -3380,6 +3459,16 @@ static const struct file_operations lpfc_debugfs_op_dumpDif = { .release = lpfc_debugfs_dumpDataDif_release, }; +#undef lpfc_debugfs_op_dif_err +static const struct file_operations lpfc_debugfs_op_dif_err = { + .owner = THIS_MODULE, + .open = lpfc_debugfs_dif_err_open, + .llseek = lpfc_debugfs_lseek, + .read = lpfc_debugfs_dif_err_read, + .write = lpfc_debugfs_dif_err_write, + .release = lpfc_debugfs_dif_err_release, +}; + #undef lpfc_debugfs_op_slow_ring_trc static const struct file_operations lpfc_debugfs_op_slow_ring_trc = { .owner = THIS_MODULE, @@ -3788,6 +3877,74 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport) goto debug_failed; } + /* Setup DIF Error Injections */ + snprintf(name, sizeof(name), "InjErrLBA"); + phba->debug_InjErrLBA = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_InjErrLBA) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0807 Cannot create debugfs InjErrLBA\n"); + goto debug_failed; + } + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + + snprintf(name, sizeof(name), "writeGuardInjErr"); + phba->debug_writeGuard = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeGuard) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0802 Cannot create debugfs writeGuard\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "writeAppInjErr"); + phba->debug_writeApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0803 Cannot create debugfs writeApp\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "writeRefInjErr"); + phba->debug_writeRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_writeRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0804 Cannot create debugfs writeRef\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "readAppInjErr"); + phba->debug_readApp = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readApp) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0805 Cannot create debugfs readApp\n"); + goto debug_failed; + } + + snprintf(name, sizeof(name), "readRefInjErr"); + phba->debug_readRef = + debugfs_create_file(name, S_IFREG|S_IRUGO|S_IWUSR, + phba->hba_debugfs_root, + phba, &lpfc_debugfs_op_dif_err); + if (!phba->debug_readRef) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, + "0806 Cannot create debugfs readApp\n"); + goto debug_failed; + } + /* Setup slow ring trace */ if (lpfc_debugfs_max_slow_ring_trc) { num = lpfc_debugfs_max_slow_ring_trc - 1; @@ -4090,6 +4247,30 @@ lpfc_debugfs_terminate(struct lpfc_vport *vport) debugfs_remove(phba->debug_dumpDif); /* dumpDif */ phba->debug_dumpDif = NULL; } + if (phba->debug_InjErrLBA) { + debugfs_remove(phba->debug_InjErrLBA); /* InjErrLBA */ + phba->debug_InjErrLBA = NULL; + } + if (phba->debug_writeGuard) { + debugfs_remove(phba->debug_writeGuard); /* writeGuard */ + phba->debug_writeGuard = NULL; + } + if (phba->debug_writeApp) { + debugfs_remove(phba->debug_writeApp); /* writeApp */ + phba->debug_writeApp = NULL; + } + if (phba->debug_writeRef) { + debugfs_remove(phba->debug_writeRef); /* writeRef */ + phba->debug_writeRef = NULL; + } + if (phba->debug_readApp) { + debugfs_remove(phba->debug_readApp); /* readApp */ + phba->debug_readApp = NULL; + } + if (phba->debug_readRef) { + debugfs_remove(phba->debug_readRef); /* readRef */ + phba->debug_readRef = NULL; + } if (phba->slow_ring_trc) { kfree(phba->slow_ring_trc); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 023da0e00d38..445826a4c981 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3386,7 +3386,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, cmdiocb->context1 = NULL; } } + + /* + * The driver received a LOGO from the rport and has ACK'd it. + * At this point, the driver is done so release the IOCB and + * remove the ndlp reference. + */ lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); return; } @@ -4082,9 +4089,6 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - lpfc_nlp_put(ndlp); - elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, - * it could be freed */ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { @@ -4166,6 +4170,11 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, psli = &phba->sli; cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len; + /* The accumulated length can exceed the BPL_SIZE. For + * now, use this as the limit + */ + if (cmdsize > LPFC_BPL_SIZE) + cmdsize = LPFC_BPL_SIZE; elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) @@ -4189,9 +4198,6 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, phba->fc_stat.elsXmitACC++; elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp; - lpfc_nlp_put(ndlp); - elsiocb->context1 = NULL; /* Don't need ndlp for cmpl, - * it could be freed */ rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { @@ -7258,16 +7264,11 @@ lpfc_issue_els_fdisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, icmd->un.elsreq64.myID = 0; icmd->un.elsreq64.fl = 1; - if ((phba->sli_rev == LPFC_SLI_REV4) && - (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) == - LPFC_SLI_INTF_IF_TYPE_0)) { - /* FDISC needs to be 1 for WQE VPI */ - elsiocb->iocb.ulpCt_h = (SLI4_CT_VPI >> 1) & 1; - elsiocb->iocb.ulpCt_l = SLI4_CT_VPI & 1 ; - /* Set the ulpContext to the vpi */ - elsiocb->iocb.ulpContext = phba->vpi_ids[vport->vpi]; - } else { - /* For FDISC, Let FDISC rsp set the NPortID for this VPI */ + /* + * SLI3 ports require a different context type value than SLI4. + * Catch SLI3 ports here and override the prep. + */ + if (phba->sli_rev == LPFC_SLI_REV3) { icmd->ulpCt_h = 1; icmd->ulpCt_l = 0; } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 0b47adf9fee8..091f68e5cb70 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -1412,7 +1412,7 @@ lpfc_register_fcf(struct lpfc_hba *phba) if (phba->pport->port_state != LPFC_FLOGI) { phba->hba_flag |= FCF_RR_INPROG; spin_unlock_irq(&phba->hbalock); - lpfc_issue_init_vfi(phba->pport); + lpfc_initial_flogi(phba->pport); return; } spin_unlock_irq(&phba->hbalock); @@ -2646,7 +2646,9 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { struct lpfc_vport *vport = mboxq->vport; - if (mboxq->u.mb.mbxStatus && (mboxq->u.mb.mbxStatus != 0x4002)) { + /* VFI not supported on interface type 0, just do the flogi */ + if (mboxq->u.mb.mbxStatus && (bf_get(lpfc_sli_intf_if_type, + &phba->sli4_hba.sli_intf) != LPFC_SLI_INTF_IF_TYPE_0)) { lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX, "2891 Init VFI mailbox failed 0x%x\n", @@ -2655,6 +2657,7 @@ lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_vport_set_state(vport, FC_VPORT_FAILED); return; } + lpfc_initial_flogi(vport); mempool_free(mboxq, phba->mbox_mem_pool); return; diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 7f8003b5181e..98d21521f539 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -41,6 +41,8 @@ * Or clear that bit field: * bf_set(example_bit_field, &t1, 0); */ +#define bf_get_be32(name, ptr) \ + ((be32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) #define bf_get_le32(name, ptr) \ ((le32_to_cpu((ptr)->name##_WORD) >> name##_SHIFT) & name##_MASK) #define bf_get(name, ptr) \ @@ -678,7 +680,6 @@ struct lpfc_register { #define lpfc_rq_doorbell_num_posted_SHIFT 16 #define lpfc_rq_doorbell_num_posted_MASK 0x3FFF #define lpfc_rq_doorbell_num_posted_WORD word0 -#define LPFC_RQ_POST_BATCH 8 /* RQEs to post at one time */ #define lpfc_rq_doorbell_id_SHIFT 0 #define lpfc_rq_doorbell_id_MASK 0xFFFF #define lpfc_rq_doorbell_id_WORD word0 @@ -784,6 +785,8 @@ union lpfc_sli4_cfg_shdr { #define LPFC_Q_CREATE_VERSION_2 2 #define LPFC_Q_CREATE_VERSION_1 1 #define LPFC_Q_CREATE_VERSION_0 0 +#define LPFC_OPCODE_VERSION_0 0 +#define LPFC_OPCODE_VERSION_1 1 } request; struct { uint32_t word6; @@ -825,6 +828,7 @@ struct mbox_header { #define LPFC_EXTENT_VERSION_DEFAULT 0 /* Subsystem Definitions */ +#define LPFC_MBOX_SUBSYSTEM_NA 0x0 #define LPFC_MBOX_SUBSYSTEM_COMMON 0x1 #define LPFC_MBOX_SUBSYSTEM_FCOE 0xC @@ -835,25 +839,34 @@ struct mbox_header { #define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF /* Common Opcodes */ -#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C -#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D -#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 -#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 -#define LPFC_MBOX_OPCODE_NOP 0x21 -#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 -#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 -#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 -#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A -#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D -#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A -#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A -#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B -#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C -#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D -#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 -#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 -#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC -#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 +#define LPFC_MBOX_OPCODE_NA 0x00 +#define LPFC_MBOX_OPCODE_CQ_CREATE 0x0C +#define LPFC_MBOX_OPCODE_EQ_CREATE 0x0D +#define LPFC_MBOX_OPCODE_MQ_CREATE 0x15 +#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES 0x20 +#define LPFC_MBOX_OPCODE_NOP 0x21 +#define LPFC_MBOX_OPCODE_MQ_DESTROY 0x35 +#define LPFC_MBOX_OPCODE_CQ_DESTROY 0x36 +#define LPFC_MBOX_OPCODE_EQ_DESTROY 0x37 +#define LPFC_MBOX_OPCODE_QUERY_FW_CFG 0x3A +#define LPFC_MBOX_OPCODE_FUNCTION_RESET 0x3D +#define LPFC_MBOX_OPCODE_GET_PORT_NAME 0x4D +#define LPFC_MBOX_OPCODE_MQ_CREATE_EXT 0x5A +#define LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO 0x9A +#define LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT 0x9B +#define LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT 0x9C +#define LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT 0x9D +#define LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG 0xA0 +#define LPFC_MBOX_OPCODE_GET_PROFILE_CONFIG 0xA4 +#define LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG 0xA5 +#define LPFC_MBOX_OPCODE_GET_PROFILE_LIST 0xA6 +#define LPFC_MBOX_OPCODE_SET_ACT_PROFILE 0xA8 +#define LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG 0xA9 +#define LPFC_MBOX_OPCODE_READ_OBJECT 0xAB +#define LPFC_MBOX_OPCODE_WRITE_OBJECT 0xAC +#define LPFC_MBOX_OPCODE_READ_OBJECT_LIST 0xAD +#define LPFC_MBOX_OPCODE_DELETE_OBJECT 0xAE +#define LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS 0xB5 /* FCoE Opcodes */ #define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE 0x01 @@ -867,6 +880,7 @@ struct mbox_header { #define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF 0x0A #define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE 0x0B #define LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF 0x10 +#define LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS 0x21 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE 0x22 #define LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK 0x23 @@ -1470,16 +1484,81 @@ struct sli4_sge { /* SLI-4 */ uint32_t addr_lo; uint32_t word2; -#define lpfc_sli4_sge_offset_SHIFT 0 /* Offset of buffer - Not used*/ -#define lpfc_sli4_sge_offset_MASK 0x1FFFFFFF +#define lpfc_sli4_sge_offset_SHIFT 0 +#define lpfc_sli4_sge_offset_MASK 0x07FFFFFF #define lpfc_sli4_sge_offset_WORD word2 -#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets - this flag !! */ +#define lpfc_sli4_sge_type_SHIFT 27 +#define lpfc_sli4_sge_type_MASK 0x0000000F +#define lpfc_sli4_sge_type_WORD word2 +#define LPFC_SGE_TYPE_DATA 0x0 +#define LPFC_SGE_TYPE_DIF 0x4 +#define LPFC_SGE_TYPE_LSP 0x5 +#define LPFC_SGE_TYPE_PEDIF 0x6 +#define LPFC_SGE_TYPE_PESEED 0x7 +#define LPFC_SGE_TYPE_DISEED 0x8 +#define LPFC_SGE_TYPE_ENC 0x9 +#define LPFC_SGE_TYPE_ATM 0xA +#define LPFC_SGE_TYPE_SKIP 0xC +#define lpfc_sli4_sge_last_SHIFT 31 /* Last SEG in the SGL sets it */ #define lpfc_sli4_sge_last_MASK 0x00000001 #define lpfc_sli4_sge_last_WORD word2 uint32_t sge_len; }; +struct sli4_sge_diseed { /* SLI-4 */ + uint32_t ref_tag; + uint32_t ref_tag_tran; + + uint32_t word2; +#define lpfc_sli4_sge_dif_apptran_SHIFT 0 +#define lpfc_sli4_sge_dif_apptran_MASK 0x0000FFFF +#define lpfc_sli4_sge_dif_apptran_WORD word2 +#define lpfc_sli4_sge_dif_af_SHIFT 24 +#define lpfc_sli4_sge_dif_af_MASK 0x00000001 +#define lpfc_sli4_sge_dif_af_WORD word2 +#define lpfc_sli4_sge_dif_na_SHIFT 25 +#define lpfc_sli4_sge_dif_na_MASK 0x00000001 +#define lpfc_sli4_sge_dif_na_WORD word2 +#define lpfc_sli4_sge_dif_hi_SHIFT 26 +#define lpfc_sli4_sge_dif_hi_MASK 0x00000001 +#define lpfc_sli4_sge_dif_hi_WORD word2 +#define lpfc_sli4_sge_dif_type_SHIFT 27 +#define lpfc_sli4_sge_dif_type_MASK 0x0000000F +#define lpfc_sli4_sge_dif_type_WORD word2 +#define lpfc_sli4_sge_dif_last_SHIFT 31 /* Last SEG in the SGL sets it */ +#define lpfc_sli4_sge_dif_last_MASK 0x00000001 +#define lpfc_sli4_sge_dif_last_WORD word2 + uint32_t word3; +#define lpfc_sli4_sge_dif_apptag_SHIFT 0 +#define lpfc_sli4_sge_dif_apptag_MASK 0x0000FFFF +#define lpfc_sli4_sge_dif_apptag_WORD word3 +#define lpfc_sli4_sge_dif_bs_SHIFT 16 +#define lpfc_sli4_sge_dif_bs_MASK 0x00000007 +#define lpfc_sli4_sge_dif_bs_WORD word3 +#define lpfc_sli4_sge_dif_ai_SHIFT 19 +#define lpfc_sli4_sge_dif_ai_MASK 0x00000001 +#define lpfc_sli4_sge_dif_ai_WORD word3 +#define lpfc_sli4_sge_dif_me_SHIFT 20 +#define lpfc_sli4_sge_dif_me_MASK 0x00000001 +#define lpfc_sli4_sge_dif_me_WORD word3 +#define lpfc_sli4_sge_dif_re_SHIFT 21 +#define lpfc_sli4_sge_dif_re_MASK 0x00000001 +#define lpfc_sli4_sge_dif_re_WORD word3 +#define lpfc_sli4_sge_dif_ce_SHIFT 22 +#define lpfc_sli4_sge_dif_ce_MASK 0x00000001 +#define lpfc_sli4_sge_dif_ce_WORD word3 +#define lpfc_sli4_sge_dif_nr_SHIFT 23 +#define lpfc_sli4_sge_dif_nr_MASK 0x00000001 +#define lpfc_sli4_sge_dif_nr_WORD word3 +#define lpfc_sli4_sge_dif_oprx_SHIFT 24 +#define lpfc_sli4_sge_dif_oprx_MASK 0x0000000F +#define lpfc_sli4_sge_dif_oprx_WORD word3 +#define lpfc_sli4_sge_dif_optx_SHIFT 28 +#define lpfc_sli4_sge_dif_optx_MASK 0x0000000F +#define lpfc_sli4_sge_dif_optx_WORD word3 +/* optx and oprx use BG_OP_IN defines in lpfc_hw.h */ +}; + struct fcf_record { uint32_t max_rcv_size; uint32_t fka_adv_period; @@ -2019,6 +2098,15 @@ struct lpfc_mbx_read_config { #define lpfc_mbx_rd_conf_extnts_inuse_MASK 0x00000001 #define lpfc_mbx_rd_conf_extnts_inuse_WORD word1 uint32_t word2; +#define lpfc_mbx_rd_conf_lnk_numb_SHIFT 0 +#define lpfc_mbx_rd_conf_lnk_numb_MASK 0x0000003F +#define lpfc_mbx_rd_conf_lnk_numb_WORD word2 +#define lpfc_mbx_rd_conf_lnk_type_SHIFT 6 +#define lpfc_mbx_rd_conf_lnk_type_MASK 0x00000003 +#define lpfc_mbx_rd_conf_lnk_type_WORD word2 +#define lpfc_mbx_rd_conf_lnk_ldv_SHIFT 8 +#define lpfc_mbx_rd_conf_lnk_ldv_MASK 0x00000001 +#define lpfc_mbx_rd_conf_lnk_ldv_WORD word2 #define lpfc_mbx_rd_conf_topology_SHIFT 24 #define lpfc_mbx_rd_conf_topology_MASK 0x000000FF #define lpfc_mbx_rd_conf_topology_WORD word2 @@ -2552,8 +2640,152 @@ struct lpfc_mbx_get_prof_cfg { } u; }; +struct lpfc_controller_attribute { + uint32_t version_string[8]; + uint32_t manufacturer_name[8]; + uint32_t supported_modes; + uint32_t word17; +#define lpfc_cntl_attr_eprom_ver_lo_SHIFT 0 +#define lpfc_cntl_attr_eprom_ver_lo_MASK 0x000000ff +#define lpfc_cntl_attr_eprom_ver_lo_WORD word17 +#define lpfc_cntl_attr_eprom_ver_hi_SHIFT 8 +#define lpfc_cntl_attr_eprom_ver_hi_MASK 0x000000ff +#define lpfc_cntl_attr_eprom_ver_hi_WORD word17 + uint32_t mbx_da_struct_ver; + uint32_t ep_fw_da_struct_ver; + uint32_t ncsi_ver_str[3]; + uint32_t dflt_ext_timeout; + uint32_t model_number[8]; + uint32_t description[16]; + uint32_t serial_number[8]; + uint32_t ip_ver_str[8]; + uint32_t fw_ver_str[8]; + uint32_t bios_ver_str[8]; + uint32_t redboot_ver_str[8]; + uint32_t driver_ver_str[8]; + uint32_t flash_fw_ver_str[8]; + uint32_t functionality; + uint32_t word105; +#define lpfc_cntl_attr_max_cbd_len_SHIFT 0 +#define lpfc_cntl_attr_max_cbd_len_MASK 0x0000ffff +#define lpfc_cntl_attr_max_cbd_len_WORD word105 +#define lpfc_cntl_attr_asic_rev_SHIFT 16 +#define lpfc_cntl_attr_asic_rev_MASK 0x000000ff +#define lpfc_cntl_attr_asic_rev_WORD word105 +#define lpfc_cntl_attr_gen_guid0_SHIFT 24 +#define lpfc_cntl_attr_gen_guid0_MASK 0x000000ff +#define lpfc_cntl_attr_gen_guid0_WORD word105 + uint32_t gen_guid1_12[3]; + uint32_t word109; +#define lpfc_cntl_attr_gen_guid13_14_SHIFT 0 +#define lpfc_cntl_attr_gen_guid13_14_MASK 0x0000ffff +#define lpfc_cntl_attr_gen_guid13_14_WORD word109 +#define lpfc_cntl_attr_gen_guid15_SHIFT 16 +#define lpfc_cntl_attr_gen_guid15_MASK 0x000000ff +#define lpfc_cntl_attr_gen_guid15_WORD word109 +#define lpfc_cntl_attr_hba_port_cnt_SHIFT 24 +#define lpfc_cntl_attr_hba_port_cnt_MASK 0x000000ff +#define lpfc_cntl_attr_hba_port_cnt_WORD word109 + uint32_t word110; +#define lpfc_cntl_attr_dflt_lnk_tmo_SHIFT 0 +#define lpfc_cntl_attr_dflt_lnk_tmo_MASK 0x0000ffff +#define lpfc_cntl_attr_dflt_lnk_tmo_WORD word110 +#define lpfc_cntl_attr_multi_func_dev_SHIFT 24 +#define lpfc_cntl_attr_multi_func_dev_MASK 0x000000ff +#define lpfc_cntl_attr_multi_func_dev_WORD word110 + uint32_t word111; +#define lpfc_cntl_attr_cache_valid_SHIFT 0 +#define lpfc_cntl_attr_cache_valid_MASK 0x000000ff +#define lpfc_cntl_attr_cache_valid_WORD word111 +#define lpfc_cntl_attr_hba_status_SHIFT 8 +#define lpfc_cntl_attr_hba_status_MASK 0x000000ff +#define lpfc_cntl_attr_hba_status_WORD word111 +#define lpfc_cntl_attr_max_domain_SHIFT 16 +#define lpfc_cntl_attr_max_domain_MASK 0x000000ff +#define lpfc_cntl_attr_max_domain_WORD word111 +#define lpfc_cntl_attr_lnk_numb_SHIFT 24 +#define lpfc_cntl_attr_lnk_numb_MASK 0x0000003f +#define lpfc_cntl_attr_lnk_numb_WORD word111 +#define lpfc_cntl_attr_lnk_type_SHIFT 30 +#define lpfc_cntl_attr_lnk_type_MASK 0x00000003 +#define lpfc_cntl_attr_lnk_type_WORD word111 + uint32_t fw_post_status; + uint32_t hba_mtu[8]; + uint32_t word121; + uint32_t reserved1[3]; + uint32_t word125; +#define lpfc_cntl_attr_pci_vendor_id_SHIFT 0 +#define lpfc_cntl_attr_pci_vendor_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_vendor_id_WORD word125 +#define lpfc_cntl_attr_pci_device_id_SHIFT 16 +#define lpfc_cntl_attr_pci_device_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_device_id_WORD word125 + uint32_t word126; +#define lpfc_cntl_attr_pci_subvdr_id_SHIFT 0 +#define lpfc_cntl_attr_pci_subvdr_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_subvdr_id_WORD word126 +#define lpfc_cntl_attr_pci_subsys_id_SHIFT 16 +#define lpfc_cntl_attr_pci_subsys_id_MASK 0x0000ffff +#define lpfc_cntl_attr_pci_subsys_id_WORD word126 + uint32_t word127; +#define lpfc_cntl_attr_pci_bus_num_SHIFT 0 +#define lpfc_cntl_attr_pci_bus_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_bus_num_WORD word127 +#define lpfc_cntl_attr_pci_dev_num_SHIFT 8 +#define lpfc_cntl_attr_pci_dev_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_dev_num_WORD word127 +#define lpfc_cntl_attr_pci_fnc_num_SHIFT 16 +#define lpfc_cntl_attr_pci_fnc_num_MASK 0x000000ff +#define lpfc_cntl_attr_pci_fnc_num_WORD word127 +#define lpfc_cntl_attr_inf_type_SHIFT 24 +#define lpfc_cntl_attr_inf_type_MASK 0x000000ff +#define lpfc_cntl_attr_inf_type_WORD word127 + uint32_t unique_id[2]; + uint32_t word130; +#define lpfc_cntl_attr_num_netfil_SHIFT 0 +#define lpfc_cntl_attr_num_netfil_MASK 0x000000ff +#define lpfc_cntl_attr_num_netfil_WORD word130 + uint32_t reserved2[4]; +}; + +struct lpfc_mbx_get_cntl_attributes { + union lpfc_sli4_cfg_shdr cfg_shdr; + struct lpfc_controller_attribute cntl_attr; +}; + +struct lpfc_mbx_get_port_name { + struct mbox_header header; + union { + struct { + uint32_t word4; +#define lpfc_mbx_get_port_name_lnk_type_SHIFT 0 +#define lpfc_mbx_get_port_name_lnk_type_MASK 0x00000003 +#define lpfc_mbx_get_port_name_lnk_type_WORD word4 + } request; + struct { + uint32_t word4; +#define lpfc_mbx_get_port_name_name0_SHIFT 0 +#define lpfc_mbx_get_port_name_name0_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name0_WORD word4 +#define lpfc_mbx_get_port_name_name1_SHIFT 8 +#define lpfc_mbx_get_port_name_name1_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name1_WORD word4 +#define lpfc_mbx_get_port_name_name2_SHIFT 16 +#define lpfc_mbx_get_port_name_name2_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name2_WORD word4 +#define lpfc_mbx_get_port_name_name3_SHIFT 24 +#define lpfc_mbx_get_port_name_name3_MASK 0x000000FF +#define lpfc_mbx_get_port_name_name3_WORD word4 +#define LPFC_LINK_NUMBER_0 0 +#define LPFC_LINK_NUMBER_1 1 +#define LPFC_LINK_NUMBER_2 2 +#define LPFC_LINK_NUMBER_3 3 + } response; + } u; +}; + /* Mailbox Completion Queue Error Messages */ -#define MB_CQE_STATUS_SUCCESS 0x0 +#define MB_CQE_STATUS_SUCCESS 0x0 #define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES 0x1 #define MB_CQE_STATUS_INVALID_PARAMETER 0x2 #define MB_CQE_STATUS_INSUFFICIENT_RESOURCES 0x3 @@ -2637,8 +2869,9 @@ struct lpfc_mqe { struct lpfc_mbx_run_link_diag_test link_diag_test; struct lpfc_mbx_get_func_cfg get_func_cfg; struct lpfc_mbx_get_prof_cfg get_prof_cfg; - struct lpfc_mbx_nop nop; struct lpfc_mbx_wr_object wr_object; + struct lpfc_mbx_get_port_name get_port_name; + struct lpfc_mbx_nop nop; } un; }; @@ -2855,6 +3088,9 @@ struct wqe_common { #define wqe_ctxt_tag_MASK 0x0000FFFF #define wqe_ctxt_tag_WORD word6 uint32_t word7; +#define wqe_dif_SHIFT 0 +#define wqe_dif_MASK 0x00000003 +#define wqe_dif_WORD word7 #define wqe_ct_SHIFT 2 #define wqe_ct_MASK 0x00000003 #define wqe_ct_WORD word7 @@ -2867,12 +3103,21 @@ struct wqe_common { #define wqe_class_SHIFT 16 #define wqe_class_MASK 0x00000007 #define wqe_class_WORD word7 +#define wqe_ar_SHIFT 19 +#define wqe_ar_MASK 0x00000001 +#define wqe_ar_WORD word7 +#define wqe_ag_SHIFT wqe_ar_SHIFT +#define wqe_ag_MASK wqe_ar_MASK +#define wqe_ag_WORD wqe_ar_WORD #define wqe_pu_SHIFT 20 #define wqe_pu_MASK 0x00000003 #define wqe_pu_WORD word7 #define wqe_erp_SHIFT 22 #define wqe_erp_MASK 0x00000001 #define wqe_erp_WORD word7 +#define wqe_conf_SHIFT wqe_erp_SHIFT +#define wqe_conf_MASK wqe_erp_MASK +#define wqe_conf_WORD wqe_erp_WORD #define wqe_lnk_SHIFT 23 #define wqe_lnk_MASK 0x00000001 #define wqe_lnk_WORD word7 @@ -2931,6 +3176,9 @@ struct wqe_common { #define wqe_xc_SHIFT 21 #define wqe_xc_MASK 0x00000001 #define wqe_xc_WORD word10 +#define wqe_sr_SHIFT 22 +#define wqe_sr_MASK 0x00000001 +#define wqe_sr_WORD word10 #define wqe_ccpe_SHIFT 23 #define wqe_ccpe_MASK 0x00000001 #define wqe_ccpe_WORD word10 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index a3c820083c36..907c94b9245d 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -58,8 +58,7 @@ spinlock_t _dump_buf_lock; static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *); static int lpfc_post_rcv_buf(struct lpfc_hba *); -static int lpfc_sli4_queue_create(struct lpfc_hba *); -static void lpfc_sli4_queue_destroy(struct lpfc_hba *); +static int lpfc_sli4_queue_verify(struct lpfc_hba *); static int lpfc_create_bootstrap_mbox(struct lpfc_hba *); static int lpfc_setup_endian_order(struct lpfc_hba *); static int lpfc_sli4_read_config(struct lpfc_hba *); @@ -1438,6 +1437,7 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) struct Scsi_Host *shost; uint32_t if_type; struct lpfc_register portstat_reg; + int rc; /* If the pci channel is offline, ignore possible errors, since * we cannot communicate with the pci card anyway. @@ -1480,16 +1480,24 @@ lpfc_handle_eratt_s4(struct lpfc_hba *phba) lpfc_sli4_offline_eratt(phba); return; } - if (bf_get(lpfc_sliport_status_rn, &portstat_reg)) { - /* - * TODO: Attempt port recovery via a port reset. - * When fully implemented, the driver should - * attempt to recover the port here and return. - * For now, log an error and take the port offline. - */ + /* + * On error status condition, driver need to wait for port + * ready before performing reset. + */ + rc = lpfc_sli4_pdev_status_reg_wait(phba); + if (!rc) { + /* need reset: attempt for port recovery */ lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2887 Port Error: Attempting " "Port Recovery\n"); + lpfc_offline_prep(phba); + lpfc_offline(phba); + lpfc_sli_brdrestart(phba); + if (lpfc_online(phba) == 0) { + lpfc_unblock_mgmt_io(phba); + return; + } + /* fall through for not able to recover */ } lpfc_sli4_offline_eratt(phba); break; @@ -1724,11 +1732,20 @@ lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len) j = 0; Length -= (3+i); while(i--) { - phba->Port[j++] = vpd[index++]; - if (j == 19) - break; + if ((phba->sli_rev == LPFC_SLI_REV4) && + (phba->sli4_hba.pport_name_sta == + LPFC_SLI4_PPNAME_GET)) { + j++; + index++; + } else + phba->Port[j++] = vpd[index++]; + if (j == 19) + break; } - phba->Port[j] = 0; + if ((phba->sli_rev != LPFC_SLI_REV4) || + (phba->sli4_hba.pport_name_sta == + LPFC_SLI4_PPNAME_NON)) + phba->Port[j] = 0; continue; } else { @@ -1958,7 +1975,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp) case PCI_DEVICE_ID_LANCER_FCOE: case PCI_DEVICE_ID_LANCER_FCOE_VF: oneConnect = 1; - m = (typeof(m)){"OCe50100", "PCIe", "FCoE"}; + m = (typeof(m)){"OCe15100", "PCIe", "FCoE"}; break; default: m = (typeof(m)){"Unknown", "", ""}; @@ -2432,17 +2449,19 @@ lpfc_block_mgmt_io(struct lpfc_hba * phba) uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; - + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irqsave(&phba->hbalock, iflag); phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO; - if (phba->sli.mbox_active) + if (phba->sli.mbox_active) { actcmd = phba->sli.mbox_active->u.mb.mbxCommand; + /* Determine how long we might wait for the active mailbox + * command to be gracefully completed by firmware. + */ + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * 1000) + jiffies; + } spin_unlock_irqrestore(&phba->hbalock, iflag); - /* Determine how long we might wait for the active mailbox - * command to be gracefully completed by firmware. - */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + - jiffies; + /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ @@ -3949,7 +3968,7 @@ static int lpfc_enable_pci_dev(struct lpfc_hba *phba) { struct pci_dev *pdev; - int bars; + int bars = 0; /* Obtain PCI device reference */ if (!phba->pcidev) @@ -3978,6 +3997,8 @@ lpfc_enable_pci_dev(struct lpfc_hba *phba) out_disable_device: pci_disable_device(pdev); out_error: + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "1401 Failed to enable pci device, bars:x%x\n", bars); return -ENODEV; } @@ -4051,9 +4072,6 @@ lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba) uint16_t nr_virtfn; int pos; - if (!pdev->is_physfn) - return 0; - pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); if (pos == 0) return 0; @@ -4474,15 +4492,15 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) } } mempool_free(mboxq, phba->mbox_mem_pool); - /* Create all the SLI4 queues */ - rc = lpfc_sli4_queue_create(phba); + /* Verify all the SLI4 queues */ + rc = lpfc_sli4_queue_verify(phba); if (rc) goto out_free_bsmbx; /* Create driver internal CQE event pool */ rc = lpfc_sli4_cq_event_pool_create(phba); if (rc) - goto out_destroy_queue; + goto out_free_bsmbx; /* Initialize and populate the iocb list per host */ rc = lpfc_init_sgl_list(phba); @@ -4516,14 +4534,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) goto out_remove_rpi_hdrs; } - phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * + /* + * The cfg_fcp_eq_count can be zero whenever there is exactly one + * interrupt vector. This is not an error + */ + if (phba->cfg_fcp_eq_count) { + phba->sli4_hba.fcp_eq_hdl = + kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fcp_eq_hdl) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2572 Failed allocate memory for fast-path " - "per-EQ handle array\n"); - rc = -ENOMEM; - goto out_free_fcf_rr_bmask; + if (!phba->sli4_hba.fcp_eq_hdl) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2572 Failed allocate memory for " + "fast-path per-EQ handle array\n"); + rc = -ENOMEM; + goto out_free_fcf_rr_bmask; + } } phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * @@ -4567,8 +4592,6 @@ out_free_sgl_list: lpfc_free_sgl_list(phba); out_destroy_cq_event_pool: lpfc_sli4_cq_event_pool_destroy(phba); -out_destroy_queue: - lpfc_sli4_queue_destroy(phba); out_free_bsmbx: lpfc_destroy_bootstrap_mbox(phba); out_free_mem: @@ -4608,9 +4631,6 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) /* Free the SCSI sgl management array */ kfree(phba->sli4_hba.lpfc_scsi_psb_array); - /* Free the SLI4 queues */ - lpfc_sli4_queue_destroy(phba); - /* Free the completion queue EQ event pool */ lpfc_sli4_cq_event_release_all(phba); lpfc_sli4_cq_event_pool_destroy(phba); @@ -6139,24 +6159,21 @@ lpfc_setup_endian_order(struct lpfc_hba *phba) } /** - * lpfc_sli4_queue_create - Create all the SLI4 queues + * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts * @phba: pointer to lpfc hba data structure. * - * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA - * operation. For each SLI4 queue type, the parameters such as queue entry - * count (queue depth) shall be taken from the module parameter. For now, - * we just use some constant number as place holder. + * This routine is invoked to check the user settable queue counts for EQs and + * CQs. after this routine is called the counts will be set to valid values that + * adhere to the constraints of the system's interrupt vectors and the port's + * queue resources. * * Return codes * 0 - successful * -ENOMEM - No available memory - * -EIO - The mailbox failed to complete successfully. **/ static int -lpfc_sli4_queue_create(struct lpfc_hba *phba) +lpfc_sli4_queue_verify(struct lpfc_hba *phba) { - struct lpfc_queue *qdesc; - int fcp_eqidx, fcp_cqidx, fcp_wqidx; int cfg_fcp_wq_count; int cfg_fcp_eq_count; @@ -6229,14 +6246,43 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* The overall number of event queues used */ phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF; - /* - * Create Event Queues (EQs) - */ - /* Get EQ depth from module parameter, fake the default for now */ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B; phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT; + /* Get CQ depth from module parameter, fake the default for now */ + phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; + phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; + + return 0; +out_error: + return -ENOMEM; +} + +/** + * lpfc_sli4_queue_create - Create all the SLI4 queues + * @phba: pointer to lpfc hba data structure. + * + * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA + * operation. For each SLI4 queue type, the parameters such as queue entry + * count (queue depth) shall be taken from the module parameter. For now, + * we just use some constant number as place holder. + * + * Return codes + * 0 - sucessful + * -ENOMEM - No availble memory + * -EIO - The mailbox failed to complete successfully. + **/ +int +lpfc_sli4_queue_create(struct lpfc_hba *phba) +{ + struct lpfc_queue *qdesc; + int fcp_eqidx, fcp_cqidx, fcp_wqidx; + + /* + * Create Event Queues (EQs) + */ + /* Create slow path event queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); @@ -6247,14 +6293,20 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) } phba->sli4_hba.sp_eq = qdesc; - /* Create fast-path FCP Event Queue(s) */ - phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), GFP_KERNEL); - if (!phba->sli4_hba.fp_eq) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "2576 Failed allocate memory for fast-path " - "EQ record array\n"); - goto out_free_sp_eq; + /* + * Create fast-path FCP Event Queue(s). The cfg_fcp_eq_count can be + * zero whenever there is exactly one interrupt vector. This is not + * an error. + */ + if (phba->cfg_fcp_eq_count) { + phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), GFP_KERNEL); + if (!phba->sli4_hba.fp_eq) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "2576 Failed allocate memory for " + "fast-path EQ record array\n"); + goto out_free_sp_eq; + } } for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, @@ -6271,10 +6323,6 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) * Create Complete Queues (CQs) */ - /* Get CQ depth from module parameter, fake the default for now */ - phba->sli4_hba.cq_esize = LPFC_CQE_SIZE; - phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT; - /* Create slow-path Mailbox Command Complete Queue */ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); @@ -6296,16 +6344,25 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.els_cq = qdesc; - /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */ - phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * - phba->cfg_fcp_eq_count), GFP_KERNEL); + /* + * Create fast-path FCP Completion Queue(s), one-to-one with FCP EQs. + * If there are no FCP EQs then create exactly one FCP CQ. + */ + if (phba->cfg_fcp_eq_count) + phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) * + phba->cfg_fcp_eq_count), + GFP_KERNEL); + else + phba->sli4_hba.fcp_cq = kzalloc(sizeof(struct lpfc_queue *), + GFP_KERNEL); if (!phba->sli4_hba.fcp_cq) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2577 Failed allocate memory for fast-path " "CQ record array\n"); goto out_free_els_cq; } - for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) { + fcp_cqidx = 0; + do { qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { @@ -6315,7 +6372,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) goto out_free_fcp_cq; } phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc; - } + } while (++fcp_cqidx < phba->cfg_fcp_eq_count); /* Create Mailbox Command Queue */ phba->sli4_hba.mq_esize = LPFC_MQE_SIZE; @@ -6447,7 +6504,7 @@ out_error: * -ENOMEM - No available memory * -EIO - The mailbox failed to complete successfully. **/ -static void +void lpfc_sli4_queue_destroy(struct lpfc_hba *phba) { int fcp_qidx; @@ -6723,6 +6780,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "0540 Receive Queue not allocated\n"); goto out_destroy_fcp_wq; } + + lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ); + lpfc_rq_adjust_repost(phba, phba->sli4_hba.dat_rq, LPFC_ELS_HBQ); + rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq, phba->sli4_hba.els_cq, LPFC_USOL); if (rc) { @@ -6731,6 +6792,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "rc = 0x%x\n", rc); goto out_destroy_fcp_wq; } + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d " "parent cq-id=%d\n", @@ -6790,8 +6852,10 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) /* Unset ELS complete queue */ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq); /* Unset FCP response complete queue */ - for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) + fcp_qidx = 0; + do { lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]); + } while (++fcp_qidx < phba->cfg_fcp_eq_count); /* Unset fast-path event queue */ for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++) lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]); @@ -7040,10 +7104,11 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) * the loop again. */ for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) { + msleep(10); if (lpfc_readl(phba->sli4_hba.u.if_type2. STATUSregaddr, ®_data.word0)) { rc = -ENODEV; - break; + goto out; } if (bf_get(lpfc_sliport_status_rdy, ®_data)) break; @@ -7051,7 +7116,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) reset_again++; break; } - msleep(10); } /* @@ -7065,11 +7129,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) } /* Detect any port errors. */ - if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr, - ®_data.word0)) { - rc = -ENODEV; - break; - } if ((bf_get(lpfc_sliport_status_err, ®_data)) || (rdy_chk >= 1000)) { phba->work_status[0] = readl( @@ -7102,6 +7161,7 @@ lpfc_pci_function_reset(struct lpfc_hba *phba) break; } +out: /* Catch the not-ready port failure after a port reset. */ if (num_resets >= MAX_IF_TYPE_2_RESETS) rc = -ENODEV; @@ -7149,12 +7209,13 @@ lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt) lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED); - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); for (cmdsent = 0; cmdsent < cnt; cmdsent++) { if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + } if (rc == MBX_TIMEOUT) break; /* Check return status */ @@ -7974,6 +8035,7 @@ lpfc_sli4_unset_hba(struct lpfc_hba *phba) /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); + lpfc_sli4_queue_destroy(phba); return; } @@ -8087,6 +8149,7 @@ lpfc_sli4_hba_unset(struct lpfc_hba *phba) /* Reset SLI4 HBA FCoE function */ lpfc_pci_function_reset(phba); + lpfc_sli4_queue_destroy(phba); /* Stop the SLI4 device port */ phba->pport->work_port_events = 0; @@ -8120,7 +8183,7 @@ lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_PORT_CAPABILITIES); + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); } @@ -8182,6 +8245,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) int rc; struct lpfc_mqe *mqe = &mboxq->u.mqe; struct lpfc_pc_sli4_params *sli4_params; + uint32_t mbox_tmo; int length; struct lpfc_sli4_parameters *mbx_sli4_parameters; @@ -8200,9 +8264,10 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) length, LPFC_SLI4_MBX_EMBED); if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); - else - rc = lpfc_sli_issue_mbox_wait(phba, mboxq, - lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG)); + else { + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); + rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); + } if (unlikely(rc)) return rc; sli4_params = &phba->sli4_hba.pc_sli4_params; @@ -8271,11 +8336,8 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1401 Failed to enable pci device.\n"); + if (error) goto out_free_phba; - } /* Set up SLI API function jump table for PCI-device group-0 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP); @@ -8322,6 +8384,9 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocb_list; } + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { @@ -8885,16 +8950,17 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) uint32_t offset = 0, temp_offset = 0; INIT_LIST_HEAD(&dma_buffer_list); - if ((image->magic_number != LPFC_GROUP_OJECT_MAGIC_NUM) || - (bf_get(lpfc_grp_hdr_file_type, image) != LPFC_FILE_TYPE_GROUP) || - (bf_get(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || - (image->size != fw->size)) { + if ((be32_to_cpu(image->magic_number) != LPFC_GROUP_OJECT_MAGIC_NUM) || + (bf_get_be32(lpfc_grp_hdr_file_type, image) != + LPFC_FILE_TYPE_GROUP) || + (bf_get_be32(lpfc_grp_hdr_id, image) != LPFC_FILE_ID_GROUP) || + (be32_to_cpu(image->size) != fw->size)) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3022 Invalid FW image found. " - "Magic:%d Type:%x ID:%x\n", - image->magic_number, - bf_get(lpfc_grp_hdr_file_type, image), - bf_get(lpfc_grp_hdr_id, image)); + "Magic:%x Type:%x ID:%x\n", + be32_to_cpu(image->magic_number), + bf_get_be32(lpfc_grp_hdr_file_type, image), + bf_get_be32(lpfc_grp_hdr_id, image)); return -EINVAL; } lpfc_decode_firmware_rev(phba, fwrev, 1); @@ -8924,11 +8990,11 @@ lpfc_write_firmware(struct lpfc_hba *phba, const struct firmware *fw) while (offset < fw->size) { temp_offset = offset; list_for_each_entry(dmabuf, &dma_buffer_list, list) { - if (offset + SLI4_PAGE_SIZE > fw->size) { - temp_offset += fw->size - offset; + if (temp_offset + SLI4_PAGE_SIZE > fw->size) { memcpy(dmabuf->virt, fw->data + temp_offset, - fw->size - offset); + fw->size - temp_offset); + temp_offset = fw->size; break; } memcpy(dmabuf->virt, fw->data + temp_offset, @@ -8984,7 +9050,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) uint32_t cfg_mode, intr_mode; int mcnt; int adjusted_fcp_eq_count; - int fcp_qidx; const struct firmware *fw; uint8_t file_name[16]; @@ -8995,11 +9060,8 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) /* Perform generic PCI device enabling operation */ error = lpfc_enable_pci_dev(phba); - if (error) { - lpfc_printf_log(phba, KERN_ERR, LOG_INIT, - "1409 Failed to enable pci device.\n"); + if (error) goto out_free_phba; - } /* Set up SLI API function jump table for PCI-device group-1 HBAs */ error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC); @@ -9054,6 +9116,9 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) goto out_free_iocb_list; } + /* Get the default values for Model Name and Description */ + lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc); + /* Create SCSI host to the physical port */ error = lpfc_create_shost(phba); if (error) { @@ -9093,16 +9158,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid) adjusted_fcp_eq_count = phba->sli4_hba.msix_vec_nr - 1; else adjusted_fcp_eq_count = phba->cfg_fcp_eq_count; - /* Free unused EQs */ - for (fcp_qidx = adjusted_fcp_eq_count; - fcp_qidx < phba->cfg_fcp_eq_count; - fcp_qidx++) { - lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]); - /* do not delete the first fcp_cq */ - if (fcp_qidx) - lpfc_sli4_queue_free( - phba->sli4_hba.fcp_cq[fcp_qidx]); - } phba->cfg_fcp_eq_count = adjusted_fcp_eq_count; /* Set up SLI-4 HBA */ if (lpfc_sli4_hba_setup(phba)) { @@ -9285,6 +9340,7 @@ lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg) /* Disable interrupt from device */ lpfc_sli4_disable_intr(phba); + lpfc_sli4_queue_destroy(phba); /* Save device state to PCI config space */ pci_save_state(pdev); @@ -9414,6 +9470,7 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Disable interrupt and pci device */ lpfc_sli4_disable_intr(phba); + lpfc_sli4_queue_destroy(phba); pci_disable_device(phba->pcidev); /* Flush all driver's outstanding SCSI I/Os as we are to reset */ diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h index e3b790e59156..baf53e6c2bd1 100644 --- a/drivers/scsi/lpfc/lpfc_logmsg.h +++ b/drivers/scsi/lpfc/lpfc_logmsg.h @@ -36,6 +36,7 @@ #define LOG_SECURITY 0x00008000 /* Security events */ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_FIP 0x00020000 /* FIP events */ +#define LOG_FCP_UNDER 0x00040000 /* FCP underruns errors */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c index 83450cc5c4d3..2ebc7d2540c0 100644 --- a/drivers/scsi/lpfc/lpfc_mbox.c +++ b/drivers/scsi/lpfc/lpfc_mbox.c @@ -1598,9 +1598,12 @@ lpfc_mbox_dev_check(struct lpfc_hba *phba) * Timeout value to be used for the given mailbox command **/ int -lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) +lpfc_mbox_tmo_val(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) { - switch (cmd) { + MAILBOX_t *mbox = &mboxq->u.mb; + uint8_t subsys, opcode; + + switch (mbox->mbxCommand) { case MBX_WRITE_NV: /* 0x03 */ case MBX_UPDATE_CFG: /* 0x1B */ case MBX_DOWN_LOAD: /* 0x1C */ @@ -1610,6 +1613,28 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd) case MBX_LOAD_EXP_ROM: /* 0x9C */ return LPFC_MBOX_TMO_FLASH_CMD; case MBX_SLI4_CONFIG: /* 0x9b */ + subsys = lpfc_sli_config_mbox_subsys_get(phba, mboxq); + opcode = lpfc_sli_config_mbox_opcode_get(phba, mboxq); + if (subsys == LPFC_MBOX_SUBSYSTEM_COMMON) { + switch (opcode) { + case LPFC_MBOX_OPCODE_READ_OBJECT: + case LPFC_MBOX_OPCODE_WRITE_OBJECT: + case LPFC_MBOX_OPCODE_READ_OBJECT_LIST: + case LPFC_MBOX_OPCODE_DELETE_OBJECT: + case LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG: + case LPFC_MBOX_OPCODE_GET_PROFILE_LIST: + case LPFC_MBOX_OPCODE_SET_ACT_PROFILE: + case LPFC_MBOX_OPCODE_SET_PROFILE_CONFIG: + case LPFC_MBOX_OPCODE_GET_FACTORY_PROFILE_CONFIG: + return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; + } + } + if (subsys == LPFC_MBOX_SUBSYSTEM_FCOE) { + switch (opcode) { + case LPFC_MBOX_OPCODE_FCOE_SET_FCLINK_SETTINGS: + return LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO; + } + } return LPFC_MBOX_SLI4_CONFIG_TMO; } return LPFC_MBOX_TMO; @@ -1859,7 +1884,7 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, } /* Complete the initialization for the particular Opcode. */ - opcode = lpfc_sli4_mbox_opcode_get(phba, mbox); + opcode = lpfc_sli_config_mbox_opcode_get(phba, mbox); switch (opcode) { case LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT: if (emb == LPFC_SLI4_MBX_EMBED) @@ -1886,23 +1911,56 @@ lpfc_sli4_mbox_rsrc_extent(struct lpfc_hba *phba, struct lpfcMboxq *mbox, } /** - * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command + * lpfc_sli_config_mbox_subsys_get - Get subsystem from a sli_config mbox cmd * @phba: pointer to lpfc hba data structure. - * @mbox: pointer to lpfc mbox command. + * @mbox: pointer to lpfc mbox command queue entry. + * + * This routine gets the subsystem from a SLI4 specific SLI_CONFIG mailbox + * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if the + * sub-header is not present, subsystem LPFC_MBOX_SUBSYSTEM_NA (0x0) shall + * be returned. + **/ +uint8_t +lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) +{ + struct lpfc_mbx_sli4_config *sli4_cfg; + union lpfc_sli4_cfg_shdr *cfg_shdr; + + if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) + return LPFC_MBOX_SUBSYSTEM_NA; + sli4_cfg = &mbox->u.mqe.un.sli4_config; + + /* For embedded mbox command, get opcode from embedded sub-header*/ + if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) { + cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr; + return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); + } + + /* For non-embedded mbox command, get opcode from first dma page */ + if (unlikely(!mbox->sge_array)) + return LPFC_MBOX_SUBSYSTEM_NA; + cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; + return bf_get(lpfc_mbox_hdr_subsystem, &cfg_shdr->request); +} + +/** + * lpfc_sli_config_mbox_opcode_get - Get opcode from a sli_config mbox cmd + * @phba: pointer to lpfc hba data structure. + * @mbox: pointer to lpfc mbox command queue entry. * - * This routine gets the opcode from a SLI4 specific mailbox command for - * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG - * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be + * This routine gets the opcode from a SLI4 specific SLI_CONFIG mailbox + * command. If the mailbox command is not MBX_SLI4_CONFIG (0x9B) or if + * the sub-header is not present, opcode LPFC_MBOX_OPCODE_NA (0x0) be * returned. **/ uint8_t -lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) +lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) { struct lpfc_mbx_sli4_config *sli4_cfg; union lpfc_sli4_cfg_shdr *cfg_shdr; if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG) - return 0; + return LPFC_MBOX_OPCODE_NA; sli4_cfg = &mbox->u.mqe.un.sli4_config; /* For embedded mbox command, get opcode from embedded sub-header*/ @@ -1913,7 +1971,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) /* For non-embedded mbox command, get opcode from first dma page */ if (unlikely(!mbox->sge_array)) - return 0; + return LPFC_MBOX_OPCODE_NA; cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0]; return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request); } diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index eadd241eeff1..5b8790b3cf4b 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -58,6 +58,13 @@ static char *dif_op_str[] = { "SCSI_PROT_READ_PASS", "SCSI_PROT_WRITE_PASS", }; + +struct scsi_dif_tuple { + __be16 guard_tag; /* Checksum */ + __be16 app_tag; /* Opaque storage */ + __be32 ref_tag; /* Target LBA or indirect LBA */ +}; + static void lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb); static void @@ -1263,6 +1270,174 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) return 0; } +static inline unsigned +lpfc_cmd_blksize(struct scsi_cmnd *sc) +{ + return sc->device->sector_size; +} + +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS +/* + * Given a scsi cmnd, determine the BlockGuard tags to be used with it + * @sc: The SCSI command to examine + * @reftag: (out) BlockGuard reference tag for transmitted data + * @apptag: (out) BlockGuard application tag for transmitted data + * @new_guard (in) Value to replace CRC with if needed + * + * Returns (1) if error injection was performed, (0) otherwise + */ +static int +lpfc_bg_err_inject(struct lpfc_hba *phba, struct scsi_cmnd *sc, + uint32_t *reftag, uint16_t *apptag, uint32_t new_guard) +{ + struct scatterlist *sgpe; /* s/g prot entry */ + struct scatterlist *sgde; /* s/g data entry */ + struct scsi_dif_tuple *src; + uint32_t op = scsi_get_prot_op(sc); + uint32_t blksize; + uint32_t numblks; + sector_t lba; + int rc = 0; + + if (op == SCSI_PROT_NORMAL) + return 0; + + lba = scsi_get_lba(sc); + if (phba->lpfc_injerr_lba != LPFC_INJERR_LBA_OFF) { + blksize = lpfc_cmd_blksize(sc); + numblks = (scsi_bufflen(sc) + blksize - 1) / blksize; + + /* Make sure we have the right LBA if one is specified */ + if ((phba->lpfc_injerr_lba < lba) || + (phba->lpfc_injerr_lba >= (lba + numblks))) + return 0; + } + + sgpe = scsi_prot_sglist(sc); + sgde = scsi_sglist(sc); + + /* Should we change the Reference Tag */ + if (reftag) { + /* + * If we are SCSI_PROT_WRITE_STRIP, the protection data is + * being stripped from the wire, thus it doesn't matter. + */ + if ((op == SCSI_PROT_WRITE_PASS) || + (op == SCSI_PROT_WRITE_INSERT)) { + if (phba->lpfc_injerr_wref_cnt) { + + /* DEADBEEF will be the reftag on the wire */ + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_wref_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9081 BLKGRD: Injecting reftag error: " + "write lba x%lx\n", (unsigned long)lba); + } + } else { + if (phba->lpfc_injerr_rref_cnt) { + *reftag = 0xDEADBEEF; + phba->lpfc_injerr_rref_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9076 BLKGRD: Injecting reftag error: " + "read lba x%lx\n", (unsigned long)lba); + } + } + } + + /* Should we change the Application Tag */ + if (apptag) { + /* + * If we are SCSI_PROT_WRITE_STRIP, the protection data is + * being stripped from the wire, thus it doesn't matter. + */ + if ((op == SCSI_PROT_WRITE_PASS) || + (op == SCSI_PROT_WRITE_INSERT)) { + if (phba->lpfc_injerr_wapp_cnt) { + + /* DEAD will be the apptag on the wire */ + *apptag = 0xDEAD; + phba->lpfc_injerr_wapp_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9077 BLKGRD: Injecting apptag error: " + "write lba x%lx\n", (unsigned long)lba); + } + } else { + if (phba->lpfc_injerr_rapp_cnt) { + *apptag = 0xDEAD; + phba->lpfc_injerr_rapp_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9078 BLKGRD: Injecting apptag error: " + "read lba x%lx\n", (unsigned long)lba); + } + } + } + + /* Should we change the Guard Tag */ + + /* + * If we are SCSI_PROT_WRITE_INSERT, the protection data is + * being on the wire is being fully generated on the HBA. + * The host cannot change it or force an error. + */ + if (((op == SCSI_PROT_WRITE_STRIP) || + (op == SCSI_PROT_WRITE_PASS)) && + phba->lpfc_injerr_wgrd_cnt) { + if (sgpe) { + src = (struct scsi_dif_tuple *)sg_virt(sgpe); + /* + * Just inject an error in the first + * prot block. + */ + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9079 BLKGRD: Injecting guard error: " + "write lba x%lx oldGuard x%x refTag x%x\n", + (unsigned long)lba, src->guard_tag, + src->ref_tag); + + src->guard_tag = (uint16_t)new_guard; + phba->lpfc_injerr_wgrd_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + + } else { + blksize = lpfc_cmd_blksize(sc); + /* + * Jump past the first data block + * and inject an error in the + * prot data. The prot data is already + * embedded after the regular data. + */ + src = (struct scsi_dif_tuple *) + (sg_virt(sgde) + blksize); + + lpfc_printf_log(phba, KERN_ERR, LOG_BG, + "9080 BLKGRD: Injecting guard error: " + "write lba x%lx oldGuard x%x refTag x%x\n", + (unsigned long)lba, src->guard_tag, + src->ref_tag); + + src->guard_tag = (uint16_t)new_guard; + phba->lpfc_injerr_wgrd_cnt--; + phba->lpfc_injerr_lba = LPFC_INJERR_LBA_OFF; + rc = 1; + } + } + return rc; +} +#endif + /* * Given a scsi cmnd, determine the BlockGuard opcodes to be used with it * @sc: The SCSI command to examine @@ -1341,18 +1516,6 @@ lpfc_sc_to_bg_opcodes(struct lpfc_hba *phba, struct scsi_cmnd *sc, return ret; } -struct scsi_dif_tuple { - __be16 guard_tag; /* Checksum */ - __be16 app_tag; /* Opaque storage */ - __be32 ref_tag; /* Target LBA or indirect LBA */ -}; - -static inline unsigned -lpfc_cmd_blksize(struct scsi_cmnd *sc) -{ - return sc->device->sector_size; -} - /* * This function sets up buffer list for protection groups of * type LPFC_PG_TYPE_NO_DIF @@ -1401,6 +1564,11 @@ lpfc_bg_setup_bpl(struct lpfc_hba *phba, struct scsi_cmnd *sc, blksize = lpfc_cmd_blksize(sc); reftag = scsi_get_lba(sc) & 0xffffffff; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* reftag is the only error we can inject here */ + lpfc_bg_err_inject(phba, sc, &reftag, 0, 0); +#endif + /* setup PDE5 with what we have */ pde5 = (struct lpfc_pde5 *) bpl; memset(pde5, 0, sizeof(struct lpfc_pde5)); @@ -1532,6 +1700,11 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, blksize = lpfc_cmd_blksize(sc); reftag = scsi_get_lba(sc) & 0xffffffff; +#ifdef CONFIG_SCSI_LPFC_DEBUG_FS + /* reftag / guard tag are the only errors we can inject here */ + lpfc_bg_err_inject(phba, sc, &reftag, 0, 0xDEAD); +#endif + split_offset = 0; do { /* setup PDE5 with what we have */ @@ -1671,7 +1844,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, } } while (!alldone); - out: return num_bde; @@ -2075,6 +2247,7 @@ lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) else bf_set(lpfc_sli4_sge_last, sgl, 0); bf_set(lpfc_sli4_sge_offset, sgl, dma_offset); + bf_set(lpfc_sli4_sge_type, sgl, LPFC_SGE_TYPE_DATA); sgl->word2 = cpu_to_le32(sgl->word2); sgl->sge_len = cpu_to_le32(dma_len); dma_offset += dma_len; @@ -2325,8 +2498,9 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, } lp = (uint32_t *)cmnd->sense_buffer; - if (!scsi_status && (resp_info & RESID_UNDER)) - logit = LOG_FCP; + if (!scsi_status && (resp_info & RESID_UNDER) && + vport->cfg_log_verbose & LOG_FCP_UNDER) + logit = LOG_FCP_UNDER; lpfc_printf_vlog(vport, KERN_WARNING, logit, "9024 FCP command x%x failed: x%x SNS x%x x%x " @@ -2342,7 +2516,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, if (resp_info & RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcprsp->rspResId)); - lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, + lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP_UNDER, "9025 FCP Read Underrun, expected %d, " "residual %d Data: x%x x%x x%x\n", be32_to_cpu(fcpcmd->fcpDl), @@ -2449,6 +2623,7 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, struct lpfc_fast_path_event *fast_path_evt; struct Scsi_Host *shost; uint32_t queue_depth, scsi_id; + uint32_t logit = LOG_FCP; /* Sanity check on return of outstanding command */ if (!(lpfc_cmd->pCmd)) @@ -2470,16 +2645,22 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn, lpfc_cmd->status = IOSTAT_DRIVER_REJECT; else if (lpfc_cmd->status >= IOSTAT_CNT) lpfc_cmd->status = IOSTAT_DEFAULT; - - lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP, - "9030 FCP cmd x%x failed <%d/%d> " - "status: x%x result: x%x Data: x%x x%x\n", - cmd->cmnd[0], - cmd->device ? cmd->device->id : 0xffff, - cmd->device ? cmd->device->lun : 0xffff, - lpfc_cmd->status, lpfc_cmd->result, - pIocbOut->iocb.ulpContext, - lpfc_cmd->cur_iocbq.iocb.ulpIoTag); + if (lpfc_cmd->status == IOSTAT_FCP_RSP_ERROR + && !lpfc_cmd->fcp_rsp->rspStatus3 + && (lpfc_cmd->fcp_rsp->rspStatus2 & RESID_UNDER) + && !(phba->cfg_log_verbose & LOG_FCP_UNDER)) + logit = 0; + else + logit = LOG_FCP | LOG_FCP_UNDER; + lpfc_printf_vlog(vport, KERN_WARNING, logit, + "9030 FCP cmd x%x failed <%d/%d> " + "status: x%x result: x%x Data: x%x x%x\n", + cmd->cmnd[0], + cmd->device ? cmd->device->id : 0xffff, + cmd->device ? cmd->device->lun : 0xffff, + lpfc_cmd->status, lpfc_cmd->result, + pIocbOut->iocb.ulpContext, + lpfc_cmd->cur_iocbq.iocb.ulpIoTag); switch (lpfc_cmd->status) { case IOSTAT_FCP_RSP_ERROR: @@ -3056,8 +3237,9 @@ lpfc_queuecommand_lck(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *)) } ndlp = rdata->pnode; - if (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && - scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) { + if ((scsi_get_prot_op(cmnd) != SCSI_PROT_NORMAL) && + (!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) || + (phba->sli_rev == LPFC_SLI_REV4))) { lpfc_printf_log(phba, KERN_ERR, LOG_BG, "9058 BLKGRD: ERROR: rcvd protected cmd:%02x" @@ -3691,9 +3873,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd) fc_host_post_vendor_event(shost, fc_get_event_number(), sizeof(scsi_event), (char *)&scsi_event, LPFC_NL_VENDOR_ID); - ret = fc_block_scsi_eh(cmnd); - if (ret) - return ret; + status = fc_block_scsi_eh(cmnd); + if (status) + return status; /* * Since the driver manages a single bus device, reset all diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8b799f047a99..4d4104f38c98 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -379,10 +379,10 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq, dq->host_index = ((dq->host_index + 1) % dq->entry_count); /* Ring The Header Receive Queue Doorbell */ - if (!(hq->host_index % LPFC_RQ_POST_BATCH)) { + if (!(hq->host_index % hq->entry_repost)) { doorbell.word0 = 0; bf_set(lpfc_rq_doorbell_num_posted, &doorbell, - LPFC_RQ_POST_BATCH); + hq->entry_repost); bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id); writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr); } @@ -1864,7 +1864,7 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno) { if (phba->sli_rev == LPFC_SLI_REV4) return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, - lpfc_hbq_defs[qno]->entry_count); + lpfc_hbq_defs[qno]->entry_count); else return lpfc_sli_hbqbuf_fill_hbqs(phba, qno, lpfc_hbq_defs[qno]->init_count); @@ -2200,10 +2200,13 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Unknown mailbox command compl */ lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "(%d):0323 Unknown Mailbox command " - "x%x (x%x) Cmpl\n", + "x%x (x%x/x%x) Cmpl\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, pmb)); + lpfc_sli_config_mbox_subsys_get(phba, + pmb), + lpfc_sli_config_mbox_opcode_get(phba, + pmb)); phba->link_state = LPFC_HBA_ERROR; phba->work_hs = HS_FFER3; lpfc_handle_eratt(phba); @@ -2215,17 +2218,19 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) { /* Mbox cmd cmpl error - RETRYing */ lpfc_printf_log(phba, KERN_INFO, - LOG_MBOX | LOG_SLI, - "(%d):0305 Mbox cmd cmpl " - "error - RETRYing Data: x%x " - "(x%x) x%x x%x x%x\n", - pmb->vport ? pmb->vport->vpi :0, - pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, - pmb), - pmbox->mbxStatus, - pmbox->un.varWords[0], - pmb->vport->port_state); + LOG_MBOX | LOG_SLI, + "(%d):0305 Mbox cmd cmpl " + "error - RETRYing Data: x%x " + "(x%x/x%x) x%x x%x x%x\n", + pmb->vport ? pmb->vport->vpi : 0, + pmbox->mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + pmb), + lpfc_sli_config_mbox_opcode_get(phba, + pmb), + pmbox->mbxStatus, + pmbox->un.varWords[0], + pmb->vport->port_state); pmbox->mbxStatus = 0; pmbox->mbxOwner = OWN_HOST; rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -2236,11 +2241,12 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba) /* Mailbox cmd <cmd> Cmpl <cmpl> */ lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p " + "(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl x%p " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n", pmb->vport ? pmb->vport->vpi : 0, pmbox->mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, pmb), + lpfc_sli_config_mbox_subsys_get(phba, pmb), + lpfc_sli_config_mbox_opcode_get(phba, pmb), pmb->mbox_cmpl, *((uint32_t *) pmbox), pmbox->un.varWords[0], @@ -4686,6 +4692,175 @@ lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, } /** + * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name + * @phba: pointer to lpfc hba data structure. + * + * This routine retrieves SLI4 device physical port name this PCI function + * is attached to. + * + * Return codes + * 0 - sucessful + * otherwise - failed to retrieve physical port name + **/ +static int +lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba) +{ + LPFC_MBOXQ_t *mboxq; + struct lpfc_mbx_read_config *rd_config; + struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr; + struct lpfc_controller_attribute *cntl_attr; + struct lpfc_mbx_get_port_name *get_port_name; + void *virtaddr = NULL; + uint32_t alloclen, reqlen; + uint32_t shdr_status, shdr_add_status; + union lpfc_sli4_cfg_shdr *shdr; + char cport_name = 0; + int rc; + + /* We assume nothing at this point */ + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL; + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON; + + mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!mboxq) + return -ENOMEM; + + /* obtain link type and link number via READ_CONFIG */ + lpfc_read_config(phba, mboxq); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + if (rc == MBX_SUCCESS) { + rd_config = &mboxq->u.mqe.un.rd_config; + if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) { + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; + phba->sli4_hba.lnk_info.lnk_tp = + bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config); + phba->sli4_hba.lnk_info.lnk_no = + bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3081 lnk_type:%d, lnk_numb:%d\n", + phba->sli4_hba.lnk_info.lnk_tp, + phba->sli4_hba.lnk_info.lnk_no); + goto retrieve_ppname; + } else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3082 Mailbox (x%x) returned ldv:x0\n", + bf_get(lpfc_mqe_command, + &mboxq->u.mqe)); + } else + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3083 Mailbox (x%x) failed, status:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + bf_get(lpfc_mqe_status, &mboxq->u.mqe)); + + /* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */ + reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes); + alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen, + LPFC_SLI4_MBX_NEMBED); + if (alloclen < reqlen) { + lpfc_printf_log(phba, KERN_ERR, LOG_SLI, + "3084 Allocated DMA memory size (%d) is " + "less than the requested DMA memory size " + "(%d)\n", alloclen, reqlen); + rc = -ENOMEM; + goto out_free_mboxq; + } + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + virtaddr = mboxq->sge_array->addr[0]; + mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr; + shdr = &mbx_cntl_attr->cfg_shdr; + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3085 Mailbox x%x (x%x/x%x) failed, " + "rc:x%x, status:x%x, add_status:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + rc, shdr_status, shdr_add_status); + rc = -ENXIO; + goto out_free_mboxq; + } + cntl_attr = &mbx_cntl_attr->cntl_attr; + phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL; + phba->sli4_hba.lnk_info.lnk_tp = + bf_get(lpfc_cntl_attr_lnk_type, cntl_attr); + phba->sli4_hba.lnk_info.lnk_no = + bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr); + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3086 lnk_type:%d, lnk_numb:%d\n", + phba->sli4_hba.lnk_info.lnk_tp, + phba->sli4_hba.lnk_info.lnk_no); + +retrieve_ppname: + lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON, + LPFC_MBOX_OPCODE_GET_PORT_NAME, + sizeof(struct lpfc_mbx_get_port_name) - + sizeof(struct lpfc_sli4_cfg_mhdr), + LPFC_SLI4_MBX_EMBED); + get_port_name = &mboxq->u.mqe.un.get_port_name; + shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr; + bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1); + bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request, + phba->sli4_hba.lnk_info.lnk_tp); + rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); + shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response); + shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response); + if (shdr_status || shdr_add_status || rc) { + lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, + "3087 Mailbox x%x (x%x/x%x) failed: " + "rc:x%x, status:x%x, add_status:x%x\n", + bf_get(lpfc_mqe_command, &mboxq->u.mqe), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), + rc, shdr_status, shdr_add_status); + rc = -ENXIO; + goto out_free_mboxq; + } + switch (phba->sli4_hba.lnk_info.lnk_no) { + case LPFC_LINK_NUMBER_0: + cport_name = bf_get(lpfc_mbx_get_port_name_name0, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_1: + cport_name = bf_get(lpfc_mbx_get_port_name_name1, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_2: + cport_name = bf_get(lpfc_mbx_get_port_name_name2, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + case LPFC_LINK_NUMBER_3: + cport_name = bf_get(lpfc_mbx_get_port_name_name3, + &get_port_name->u.response); + phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET; + break; + default: + break; + } + + if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) { + phba->Port[0] = cport_name; + phba->Port[1] = '\0'; + lpfc_printf_log(phba, KERN_INFO, LOG_SLI, + "3091 SLI get port name: %s\n", phba->Port); + } + +out_free_mboxq: + if (rc != MBX_TIMEOUT) { + if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG) + lpfc_sli4_mbox_cmd_free(phba, mboxq); + else + mempool_free(mboxq, phba->mbox_mem_pool); + } + return rc; +} + +/** * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues * @phba: pointer to lpfc hba data structure. * @@ -4754,7 +4929,7 @@ lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) { @@ -4911,7 +5086,7 @@ lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t *extnt_cnt, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } @@ -5194,7 +5369,7 @@ lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type) if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, mbox_tmo); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } if (unlikely(rc)) { @@ -5619,7 +5794,7 @@ lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } @@ -5748,6 +5923,17 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) kfree(vpd); goto out_free_mbox; } + + /* + * Retrieve sli4 device physical port name, failure of doing it + * is considered as non-fatal. + */ + rc = lpfc_sli4_retrieve_pport_name(phba); + if (!rc) + lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, + "3080 Successful retrieving SLI4 device " + "physical port name: %s.\n", phba->Port); + /* * Evaluate the read rev and vpd data. Populate the driver * state with the results. If this routine fails, the failure @@ -5818,9 +6004,13 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) * then turn off the global config parameters to disable the * feature in the driver. This is not a fatal error. */ - if ((phba->cfg_enable_bg) && - !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) - ftr_rsp++; + phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED; + if (phba->cfg_enable_bg) { + if (bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)) + phba->sli3_options |= LPFC_SLI3_BG_ENABLED; + else + ftr_rsp++; + } if (phba->max_vpi && phba->cfg_enable_npiv && !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs))) @@ -5937,12 +6127,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + /* Create all the SLI4 queues */ + rc = lpfc_sli4_queue_create(phba); + if (rc) { + lpfc_printf_log(phba, KERN_ERR, LOG_INIT, + "3089 Failed to allocate queues\n"); + rc = -ENODEV; + goto out_stop_timers; + } /* Set up all the queues to the device */ rc = lpfc_sli4_queue_setup(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, "0381 Error %d during queue setup.\n ", rc); - goto out_stop_timers; + goto out_destroy_queue; } /* Arm the CQs and then EQs on device */ @@ -6015,15 +6213,20 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) spin_lock_irq(&phba->hbalock); phba->link_state = LPFC_LINK_DOWN; spin_unlock_irq(&phba->hbalock); - if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) + if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) { rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT); + if (rc) + goto out_unset_queue; + } + mempool_free(mboxq, phba->mbox_mem_pool); + return rc; out_unset_queue: /* Unset all the queues set up in this routine when error out */ - if (rc) - lpfc_sli4_queue_unset(phba); + lpfc_sli4_queue_unset(phba); +out_destroy_queue: + lpfc_sli4_queue_destroy(phba); out_stop_timers: - if (rc) - lpfc_stop_hba_timers(phba); + lpfc_stop_hba_timers(phba); out_free_mbox: mempool_free(mboxq, phba->mbox_mem_pool); return rc; @@ -6318,7 +6521,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, } /* timeout active mbox command */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand)))); + (HZ * lpfc_mbox_tmo_val(phba, pmbox)))); } /* Mailbox cmd <cmd> issue */ @@ -6442,9 +6645,8 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, drvr_flag); goto out_not_finished; } - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, - mb->mbxCommand) * - 1000) + jiffies; + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) * + 1000) + jiffies; i = 0; /* Wait for command to complete */ while (((word0 & OWN_CHIP) == OWN_CHIP) || @@ -6555,21 +6757,21 @@ static int lpfc_sli4_async_mbox_block(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; - uint8_t actcmd = MBX_HEARTBEAT; int rc = 0; - unsigned long timeout; + unsigned long timeout = 0; /* Mark the asynchronous mailbox command posting as blocked */ spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; - if (phba->sli.mbox_active) - actcmd = phba->sli.mbox_active->u.mb.mbxCommand; - spin_unlock_irq(&phba->hbalock); /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * 1000) + - jiffies; + if (phba->sli.mbox_active) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * + 1000) + jiffies; + spin_unlock_irq(&phba->hbalock); + /* Wait for the outstnading mailbox command to complete */ while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ @@ -6664,11 +6866,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) { spin_unlock_irqrestore(&phba->hbalock, iflag); lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2532 Mailbox command x%x (x%x) " + "(%d):2532 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, MBX_POLL); return MBXERR_ERROR; } @@ -6691,7 +6894,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) dma_address = &phba->sli4_hba.bmbx.dma_address; writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr); - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) * 1000) + jiffies; do { bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); @@ -6707,7 +6910,7 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) /* Post the low mailbox dma address to the port. */ writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr); - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq) * 1000) + jiffies; do { bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr); @@ -6746,11 +6949,12 @@ lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) lpfc_sli4_swap_str(phba, mboxq); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0356 Mailbox cmd x%x (x%x) Status x%x " + "(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x " "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x" " x%x x%x CQ: x%x x%x x%x x%x\n", - mboxq->vport ? mboxq->vport->vpi : 0, - mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq), + mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), bf_get(lpfc_mqe_status, mb), mb->un.mb_words[0], mb->un.mb_words[1], mb->un.mb_words[2], mb->un.mb_words[3], @@ -6796,11 +7000,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, rc = lpfc_mbox_dev_check(phba); if (unlikely(rc)) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2544 Mailbox command x%x (x%x) " + "(%d):2544 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); goto out_not_finished; } @@ -6814,20 +7019,25 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2541 Mailbox command x%x " - "(x%x) cannot issue Data: x%x x%x\n", + "(x%x/x%x) cannot issue Data: " + "x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, + mboxq), + lpfc_sli_config_mbox_opcode_get(phba, + mboxq), psli->sli_flag, flag); return rc; } else if (flag == MBX_POLL) { lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI, "(%d):2542 Try to issue mailbox command " - "x%x (x%x) synchronously ahead of async" + "x%x (x%x/x%x) synchronously ahead of async" "mailbox command queue: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); /* Try to block the asynchronous mailbox posting */ rc = lpfc_sli4_async_mbox_block(phba); @@ -6836,16 +7046,18 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, rc = lpfc_sli4_post_sync_mbox(phba, mboxq); if (rc != MBX_SUCCESS) lpfc_printf_log(phba, KERN_ERR, - LOG_MBOX | LOG_SLI, - "(%d):2597 Mailbox command " - "x%x (x%x) cannot issue " - "Data: x%x x%x\n", - mboxq->vport ? - mboxq->vport->vpi : 0, - mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, - mboxq), - psli->sli_flag, flag); + LOG_MBOX | LOG_SLI, + "(%d):2597 Mailbox command " + "x%x (x%x/x%x) cannot issue " + "Data: x%x x%x\n", + mboxq->vport ? + mboxq->vport->vpi : 0, + mboxq->u.mb.mbxCommand, + lpfc_sli_config_mbox_subsys_get(phba, + mboxq), + lpfc_sli_config_mbox_opcode_get(phba, + mboxq), + psli->sli_flag, flag); /* Unblock the async mailbox posting afterward */ lpfc_sli4_async_mbox_unblock(phba); } @@ -6856,11 +7068,12 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, rc = lpfc_mbox_cmd_check(phba, mboxq); if (rc) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2543 Mailbox command x%x (x%x) " + "(%d):2543 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, flag); goto out_not_finished; } @@ -6872,10 +7085,11 @@ lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq, spin_unlock_irqrestore(&phba->hbalock, iflags); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, "(%d):0354 Mbox cmd issue - Enqueue Data: " - "x%x (x%x) x%x x%x x%x\n", + "x%x (x%x/x%x) x%x x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0xffffff, bf_get(lpfc_mqe_command, &mboxq->u.mqe), - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), phba->pport->port_state, psli->sli_flag, MBX_NOWAIT); /* Wake up worker thread to transport mailbox command from head */ @@ -6952,13 +7166,14 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) /* Start timer for the mbox_tmo and log some mailbox post messages */ mod_timer(&psli->mbox_tmo, (jiffies + - (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd)))); + (HZ * lpfc_mbox_tmo_val(phba, mboxq)))); lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI, - "(%d):0355 Mailbox cmd x%x (x%x) issue Data: " + "(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: " "x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), phba->pport->port_state, psli->sli_flag); if (mbx_cmnd != MBX_HEARTBEAT) { @@ -6982,11 +7197,12 @@ lpfc_sli4_post_async_mbox(struct lpfc_hba *phba) rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe); if (rc != MBX_SUCCESS) { lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI, - "(%d):2533 Mailbox command x%x (x%x) " + "(%d):2533 Mailbox command x%x (x%x/x%x) " "cannot issue Data: x%x x%x\n", mboxq->vport ? mboxq->vport->vpi : 0, mboxq->u.mb.mbxCommand, - lpfc_sli4_mbox_opcode_get(phba, mboxq), + lpfc_sli_config_mbox_subsys_get(phba, mboxq), + lpfc_sli_config_mbox_opcode_get(phba, mboxq), psli->sli_flag, MBX_NOWAIT); goto out_not_finished; } @@ -7322,6 +7538,8 @@ lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, if (inbound == 1) offset = 0; bf_set(lpfc_sli4_sge_offset, sgl, offset); + bf_set(lpfc_sli4_sge_type, sgl, + LPFC_SGE_TYPE_DATA); offset += bde.tus.f.bdeSize; } sgl->word2 = cpu_to_le32(sgl->word2); @@ -9359,7 +9577,6 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq, /* now issue the command */ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT); - if (retval == MBX_BUSY || retval == MBX_SUCCESS) { wait_event_interruptible_timeout(done_q, pmboxq->mbox_flag & LPFC_MBX_WAKE, @@ -9403,23 +9620,24 @@ void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba) { struct lpfc_sli *psli = &phba->sli; - uint8_t actcmd = MBX_HEARTBEAT; unsigned long timeout; + timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies; spin_lock_irq(&phba->hbalock); psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK; spin_unlock_irq(&phba->hbalock); if (psli->sli_flag & LPFC_SLI_ACTIVE) { spin_lock_irq(&phba->hbalock); - if (phba->sli.mbox_active) - actcmd = phba->sli.mbox_active->u.mb.mbxCommand; - spin_unlock_irq(&phba->hbalock); /* Determine how long we might wait for the active mailbox * command to be gracefully completed by firmware. */ - timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) * - 1000) + jiffies; + if (phba->sli.mbox_active) + timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, + phba->sli.mbox_active) * + 1000) + jiffies; + spin_unlock_irq(&phba->hbalock); + while (phba->sli.mbox_active) { /* Check active mailbox complete status every 2ms */ msleep(2); @@ -10415,12 +10633,17 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) /* Move mbox data to caller's mailbox region, do endian swapping */ if (pmb->mbox_cmpl && mbox) lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe)); - /* Set the mailbox status with SLI4 range 0x4000 */ - mcqe_status = bf_get(lpfc_mcqe_status, mcqe); - if (mcqe_status != MB_CQE_STATUS_SUCCESS) - bf_set(lpfc_mqe_status, mqe, - (LPFC_MBX_ERROR_RANGE | mcqe_status)); + /* + * For mcqe errors, conditionally move a modified error code to + * the mbox so that the error will not be missed. + */ + mcqe_status = bf_get(lpfc_mcqe_status, mcqe); + if (mcqe_status != MB_CQE_STATUS_SUCCESS) { + if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS) + bf_set(lpfc_mqe_status, mqe, + (LPFC_MBX_ERROR_RANGE | mcqe_status)); + } if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) { pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT, @@ -10796,7 +11019,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) case LPFC_MCQ: while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) + if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } break; @@ -10808,7 +11031,7 @@ lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe) else workposted |= lpfc_sli4_sp_handle_cqe(phba, cq, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) + if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } break; @@ -11040,7 +11263,7 @@ lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe, /* Process all the entries to the CQ */ while ((cqe = lpfc_sli4_cq_get(cq))) { workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) + if (!(++ecount % cq->entry_repost)) lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM); } @@ -11110,6 +11333,8 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id) /* Get to the EQ struct associated with this vector */ speq = phba->sli4_hba.sp_eq; + if (unlikely(!speq)) + return IRQ_NONE; /* Check device state for handling interrupt */ if (unlikely(lpfc_intr_state_check(phba))) { @@ -11127,7 +11352,7 @@ lpfc_sli4_sp_intr_handler(int irq, void *dev_id) */ while ((eqe = lpfc_sli4_eq_get(speq))) { lpfc_sli4_sp_handle_eqe(phba, eqe); - if (!(++ecount % LPFC_GET_QE_REL_INT)) + if (!(++ecount % speq->entry_repost)) lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM); } @@ -11187,6 +11412,8 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) if (unlikely(!phba)) return IRQ_NONE; + if (unlikely(!phba->sli4_hba.fp_eq)) + return IRQ_NONE; /* Get to the EQ struct associated with this vector */ fpeq = phba->sli4_hba.fp_eq[fcp_eqidx]; @@ -11207,7 +11434,7 @@ lpfc_sli4_fp_intr_handler(int irq, void *dev_id) */ while ((eqe = lpfc_sli4_eq_get(fpeq))) { lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx); - if (!(++ecount % LPFC_GET_QE_REL_INT)) + if (!(++ecount % fpeq->entry_repost)) lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM); } @@ -11359,6 +11586,15 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size, } queue->entry_size = entry_size; queue->entry_count = entry_count; + + /* + * entry_repost is calculated based on the number of entries in the + * queue. This works out except for RQs. If buffers are NOT initially + * posted for every RQE, entry_repost should be adjusted accordingly. + */ + queue->entry_repost = (entry_count >> 3); + if (queue->entry_repost < LPFC_QUEUE_MIN_REPOST) + queue->entry_repost = LPFC_QUEUE_MIN_REPOST; queue->phba = phba; return queue; @@ -11924,6 +12160,31 @@ out: } /** + * lpfc_rq_adjust_repost - Adjust entry_repost for an RQ + * @phba: HBA structure that indicates port to create a queue on. + * @rq: The queue structure to use for the receive queue. + * @qno: The associated HBQ number + * + * + * For SLI4 we need to adjust the RQ repost value based on + * the number of buffers that are initially posted to the RQ. + */ +void +lpfc_rq_adjust_repost(struct lpfc_hba *phba, struct lpfc_queue *rq, int qno) +{ + uint32_t cnt; + + cnt = lpfc_hbq_defs[qno]->entry_count; + + /* Recalc repost for RQs based on buffers initially posted */ + cnt = (cnt >> 3); + if (cnt < LPFC_QUEUE_MIN_REPOST) + cnt = LPFC_QUEUE_MIN_REPOST; + + rq->entry_repost = cnt; +} + +/** * lpfc_rq_create - Create a Receive Queue on the HBA * @phba: HBA structure that indicates port to create a queue on. * @hrq: The queue structure to use to create the header receive queue. @@ -12489,7 +12750,7 @@ lpfc_sli4_post_sgl(struct lpfc_hba *phba, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } /* The IOCTL status is embedded in the mailbox subheader. */ @@ -12704,7 +12965,7 @@ lpfc_sli4_post_els_sgl_list(struct lpfc_hba *phba) if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; @@ -12867,7 +13128,7 @@ lpfc_sli4_post_els_sgl_list_ext(struct lpfc_hba *phba) if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; @@ -12991,7 +13252,7 @@ lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; @@ -13147,7 +13408,7 @@ lpfc_sli4_post_scsi_sgl_blk_ext(struct lpfc_hba *phba, struct list_head *sblist, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr; @@ -13296,7 +13557,8 @@ lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr, uint32_t did = (fc_hdr->fh_d_id[0] << 16 | fc_hdr->fh_d_id[1] << 8 | fc_hdr->fh_d_id[2]); - + if (did == Fabric_DID) + return phba->pport; vports = lpfc_create_vport_work_array(phba); if (vports != NULL) for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) { @@ -14312,7 +14574,7 @@ lpfc_sli4_init_vpi(struct lpfc_vport *vport) if (!mboxq) return -ENOMEM; lpfc_init_vpi(phba, mboxq, vport->vpi); - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI); + mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq); rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo); if (rc != MBX_SUCCESS) { lpfc_printf_vlog(vport, KERN_ERR, LOG_SLI, @@ -15188,7 +15450,7 @@ lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list, if (!phba->sli4_hba.intr_enable) rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL); else { - mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG); + mbox_tmo = lpfc_mbox_tmo_val(phba, mbox); rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo); } /* The IOCTL status is embedded in the mailbox subheader. */ diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index a0075b0af142..29c13b63e323 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h @@ -293,13 +293,11 @@ struct lpfc_sli { struct lpfc_lnk_stat lnk_stat_offsets; }; -#define LPFC_MBOX_TMO 30 /* Sec tmo for outstanding mbox - command */ -#define LPFC_MBOX_SLI4_CONFIG_TMO 60 /* Sec tmo for outstanding mbox - command */ -#define LPFC_MBOX_TMO_FLASH_CMD 300 /* Sec tmo for outstanding FLASH write - * or erase cmds. This is especially - * long because of the potential of - * multiple flash erases that can be - * spawned. - */ +/* Timeout for normal outstanding mbox command (Seconds) */ +#define LPFC_MBOX_TMO 30 +/* Timeout for non-flash-based outstanding sli_config mbox command (Seconds) */ +#define LPFC_MBOX_SLI4_CONFIG_TMO 60 +/* Timeout for flash-based outstanding sli_config mbox command (Seconds) */ +#define LPFC_MBOX_SLI4_CONFIG_EXTENDED_TMO 300 +/* Timeout for other flash-based outstanding mbox command (Seconds) */ +#define LPFC_MBOX_TMO_FLASH_CMD 300 diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 19bb87ae8597..d5cffd8af340 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h @@ -23,7 +23,6 @@ #define LPFC_XRI_EXCH_BUSY_WAIT_T1 10 #define LPFC_XRI_EXCH_BUSY_WAIT_T2 30000 #define LPFC_RELEASE_NOTIFICATION_INTERVAL 32 -#define LPFC_GET_QE_REL_INT 32 #define LPFC_RPI_LOW_WATER_MARK 10 #define LPFC_UNREG_FCF 1 @@ -126,6 +125,8 @@ struct lpfc_queue { struct list_head child_list; uint32_t entry_count; /* Number of entries to support on the queue */ uint32_t entry_size; /* Size of each queue entry. */ + uint32_t entry_repost; /* Count of entries before doorbell is rung */ +#define LPFC_QUEUE_MIN_REPOST 8 uint32_t queue_id; /* Queue ID assigned by the hardware */ uint32_t assoc_qid; /* Queue ID associated with, for CQ/WQ/MQ */ struct list_head page_list; @@ -388,6 +389,16 @@ struct lpfc_iov { uint32_t vf_number; }; +struct lpfc_sli4_lnk_info { + uint8_t lnk_dv; +#define LPFC_LNK_DAT_INVAL 0 +#define LPFC_LNK_DAT_VAL 1 + uint8_t lnk_tp; +#define LPFC_LNK_GE 0x0 /* FCoE */ +#define LPFC_LNK_FC 0x1 /* FC */ + uint8_t lnk_no; +}; + /* SLI4 HBA data structure entries */ struct lpfc_sli4_hba { void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for @@ -503,6 +514,10 @@ struct lpfc_sli4_hba { struct list_head sp_els_xri_aborted_work_queue; struct list_head sp_unsol_work_queue; struct lpfc_sli4_link link_state; + struct lpfc_sli4_lnk_info lnk_info; + uint32_t pport_name_sta; +#define LPFC_SLI4_PPNAME_NON 0 +#define LPFC_SLI4_PPNAME_GET 1 struct lpfc_iov iov; spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */ spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */ @@ -553,6 +568,7 @@ struct lpfc_rsrc_blks { * SLI4 specific function prototypes */ int lpfc_pci_function_reset(struct lpfc_hba *); +int lpfc_sli4_pdev_status_reg_wait(struct lpfc_hba *); int lpfc_sli4_hba_setup(struct lpfc_hba *); int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t, uint8_t, uint32_t, bool); @@ -576,6 +592,7 @@ uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *, struct lpfc_queue *, struct lpfc_queue *, uint32_t); +void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int); uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *); uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *); uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *); @@ -632,5 +649,5 @@ void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli4_unregister_fcf(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *); -uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); - +uint8_t lpfc_sli_config_mbox_subsys_get(struct lpfc_hba *, LPFC_MBOXQ_t *); +uint8_t lpfc_sli_config_mbox_opcode_get(struct lpfc_hba *, LPFC_MBOXQ_t *); diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index c1e0ae94d9f4..b0630e37f1ef 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -18,7 +18,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "8.3.25" +#define LPFC_DRIVER_VERSION "8.3.27" #define LPFC_DRIVER_NAME "lpfc" #define LPFC_SP_DRIVER_HANDLER_NAME "lpfc:sp" #define LPFC_FP_DRIVER_HANDLER_NAME "lpfc:fp" diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 1feb551a57bc..cff6ca67415c 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -692,13 +692,14 @@ lpfc_vport_delete(struct fc_vport *fc_vport) /* Indicate free memory when release */ NLP_SET_FREE_REQ(ndlp); } else { - if (!NLP_CHK_NODE_ACT(ndlp)) + if (!NLP_CHK_NODE_ACT(ndlp)) { ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE); if (!ndlp) goto skip_logo; + } - /* Remove ndlp from vport npld list */ + /* Remove ndlp from vport list */ lpfc_dequeue_node(vport, ndlp); spin_lock_irq(&phba->ndlp_lock); if (!NLP_CHK_FREE_REQ(ndlp)) @@ -711,8 +712,17 @@ lpfc_vport_delete(struct fc_vport *fc_vport) } spin_unlock_irq(&phba->ndlp_lock); } - if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) + + /* + * If the vpi is not registered, then a valid FDISC doesn't + * exist and there is no need for a ELS LOGO. Just cleanup + * the ndlp. + */ + if (!(vport->vpi_state & LPFC_VPI_REGISTERED)) { + lpfc_nlp_put(ndlp); goto skip_logo; + } + vport->unreg_vpi_cmpl = VPORT_INVAL; timeout = msecs_to_jiffies(phba->fc_ratov * 2000); if (!lpfc_issue_els_npiv_logo(vport, ndlp)) diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c index 3893337e3dd3..590ce1ef2016 100644 --- a/drivers/scsi/mac_esp.c +++ b/drivers/scsi/mac_esp.c @@ -230,9 +230,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, u32 dma_count, int write, u8 cmd) { struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); - unsigned long flags; - - local_irq_save(flags); mep->error = 0; @@ -270,8 +267,6 @@ static void mac_esp_send_pdma_cmd(struct esp *esp, u32 addr, u32 esp_count, esp_count = n; } } while (esp_count); - - local_irq_restore(flags); } /* @@ -353,8 +348,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, struct mac_esp_priv *mep = MAC_ESP_GET_PRIV(esp); u8 *fifo = esp->regs + ESP_FDATA * 16; - disable_irq(esp->host->irq); - cmd &= ~ESP_CMD_DMA; mep->error = 0; @@ -431,8 +424,6 @@ static void mac_esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count, scsi_esp_cmd(esp, ESP_CMD_TI); } } - - enable_irq(esp->host->irq); } static int mac_esp_irq_pending(struct esp *esp) diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 3948a00d81f4..dd94c7d574fb 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h @@ -33,9 +33,9 @@ /* * MegaRAID SAS Driver meta data */ -#define MEGASAS_VERSION "00.00.05.40-rc1" -#define MEGASAS_RELDATE "Jul. 26, 2011" -#define MEGASAS_EXT_VERSION "Tue. Jul. 26 17:00:00 PDT 2011" +#define MEGASAS_VERSION "00.00.06.12-rc1" +#define MEGASAS_RELDATE "Oct. 5, 2011" +#define MEGASAS_EXT_VERSION "Wed. Oct. 5 17:00:00 PDT 2011" /* * Device IDs @@ -48,6 +48,7 @@ #define PCI_DEVICE_ID_LSI_SAS0073SKINNY 0x0073 #define PCI_DEVICE_ID_LSI_SAS0071SKINNY 0x0071 #define PCI_DEVICE_ID_LSI_FUSION 0x005b +#define PCI_DEVICE_ID_LSI_INVADER 0x005d /* * ===================================== @@ -138,6 +139,7 @@ #define MFI_CMD_ABORT 0x06 #define MFI_CMD_SMP 0x07 #define MFI_CMD_STP 0x08 +#define MFI_CMD_INVALID 0xff #define MR_DCMD_CTRL_GET_INFO 0x01010000 #define MR_DCMD_LD_GET_LIST 0x03010000 @@ -221,6 +223,7 @@ enum MFI_STAT { MFI_STAT_RESERVATION_IN_PROGRESS = 0x36, MFI_STAT_I2C_ERRORS_DETECTED = 0x37, MFI_STAT_PCI_ERRORS_DETECTED = 0x38, + MFI_STAT_CONFIG_SEQ_MISMATCH = 0x67, MFI_STAT_INVALID_STATUS = 0xFF }; @@ -716,7 +719,7 @@ struct megasas_ctrl_info { #define MEGASAS_DEFAULT_INIT_ID -1 #define MEGASAS_MAX_LUN 8 #define MEGASAS_MAX_LD 64 -#define MEGASAS_DEFAULT_CMD_PER_LUN 128 +#define MEGASAS_DEFAULT_CMD_PER_LUN 256 #define MEGASAS_MAX_PD (MEGASAS_MAX_PD_CHANNELS * \ MEGASAS_MAX_DEV_PER_CHANNEL) #define MEGASAS_MAX_LD_IDS (MEGASAS_MAX_LD_CHANNELS * \ @@ -755,6 +758,7 @@ struct megasas_ctrl_info { #define MEGASAS_INT_CMDS 32 #define MEGASAS_SKINNY_INT_CMDS 5 +#define MEGASAS_MAX_MSIX_QUEUES 16 /* * FW can accept both 32 and 64 bit SGLs. We want to allocate 32/64 bit * SGLs based on the size of dma_addr_t @@ -1276,6 +1280,11 @@ struct megasas_aen_event { struct megasas_instance *instance; }; +struct megasas_irq_context { + struct megasas_instance *instance; + u32 MSIxIndex; +}; + struct megasas_instance { u32 *producer; @@ -1349,8 +1358,9 @@ struct megasas_instance { /* Ptr to hba specific information */ void *ctrl_context; - u8 msi_flag; - struct msix_entry msixentry; + unsigned int msix_vectors; + struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES]; + struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES]; u64 map_id; struct megasas_cmd *map_update_cmd; unsigned long bar; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 776d01988660..29a994f9c4f1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -18,7 +18,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * FILE: megaraid_sas_base.c - * Version : v00.00.05.40-rc1 + * Version : v00.00.06.12-rc1 * * Authors: LSI Corporation * Sreenivas Bagalkote @@ -84,7 +84,7 @@ MODULE_VERSION(MEGASAS_VERSION); MODULE_AUTHOR("megaraidlinux@lsi.com"); MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); -int megasas_transition_to_ready(struct megasas_instance *instance); +int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); static int megasas_get_pd_list(struct megasas_instance *instance); static int megasas_issue_init_mfi(struct megasas_instance *instance); static int megasas_register_aen(struct megasas_instance *instance, @@ -114,6 +114,8 @@ static struct pci_device_id megasas_pci_table[] = { /* xscale IOP */ {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FUSION)}, /* Fusion */ + {PCI_DEVICE(PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_INVADER)}, + /* Invader */ {} }; @@ -213,6 +215,10 @@ megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) cmd->scmd = NULL; cmd->frame_count = 0; + if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && + (reset_devices)) + cmd->frame->hdr.cmd = MFI_CMD_INVALID; list_add_tail(&cmd->list, &instance->cmd_pool); spin_unlock_irqrestore(&instance->cmd_pool_lock, flags); @@ -1583,7 +1589,8 @@ void megaraid_sas_kill_hba(struct megasas_instance *instance) { if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) || (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || - (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION)) { + (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { writel(MFI_STOP_ADP, &instance->reg_set->doorbell); } else { writel(MFI_STOP_ADP, &instance->reg_set->inbound_doorbell); @@ -1907,7 +1914,6 @@ static int megasas_generic_reset(struct scsi_cmnd *scmd) static enum blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) { - struct megasas_cmd *cmd = (struct megasas_cmd *)scmd->SCp.ptr; struct megasas_instance *instance; unsigned long flags; @@ -1916,7 +1922,7 @@ blk_eh_timer_return megasas_reset_timer(struct scsi_cmnd *scmd) return BLK_EH_NOT_HANDLED; } - instance = cmd->instance; + instance = (struct megasas_instance *)scmd->device->host->hostdata; if (!(instance->flag & MEGASAS_FW_BUSY)) { /* FW is busy, throttle IO */ spin_lock_irqsave(instance->host->host_lock, flags); @@ -1957,7 +1963,8 @@ static int megasas_reset_bus_host(struct scsi_cmnd *scmd) /* * First wait for all commands to complete */ - if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) ret = megasas_reset_fusion(scmd->device->host); else ret = megasas_generic_reset(scmd); @@ -2161,7 +2168,16 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, cmd->scmd->SCp.ptr = NULL; switch (hdr->cmd) { - + case MFI_CMD_INVALID: + /* Some older 1068 controller FW may keep a pended + MR_DCMD_CTRL_EVENT_GET_INFO left over from the main kernel + when booting the kdump kernel. Ignore this command to + prevent a kernel panic on shutdown of the kdump kernel. */ + printk(KERN_WARNING "megaraid_sas: MFI_CMD_INVALID command " + "completed.\n"); + printk(KERN_WARNING "megaraid_sas: If you have a controller " + "other than PERC5, please upgrade your firmware.\n"); + break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: @@ -2477,7 +2493,7 @@ process_fw_state_change_wq(struct work_struct *work) msleep(1000); } - if (megasas_transition_to_ready(instance)) { + if (megasas_transition_to_ready(instance, 1)) { printk(KERN_NOTICE "megaraid_sas:adapter not ready\n"); megaraid_sas_kill_hba(instance); @@ -2532,7 +2548,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, instance->reg_set) ) == 0) { /* Hardware may not set outbound_intr_status in MSI-X mode */ - if (!instance->msi_flag) + if (!instance->msix_vectors) return IRQ_NONE; } @@ -2590,16 +2606,14 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, */ static irqreturn_t megasas_isr(int irq, void *devp) { - struct megasas_instance *instance; + struct megasas_irq_context *irq_context = devp; + struct megasas_instance *instance = irq_context->instance; unsigned long flags; irqreturn_t rc; - if (atomic_read( - &(((struct megasas_instance *)devp)->fw_reset_no_pci_access))) + if (atomic_read(&instance->fw_reset_no_pci_access)) return IRQ_HANDLED; - instance = (struct megasas_instance *)devp; - spin_lock_irqsave(&instance->hba_lock, flags); rc = megasas_deplete_reply_queue(instance, DID_OK); spin_unlock_irqrestore(&instance->hba_lock, flags); @@ -2617,7 +2631,7 @@ static irqreturn_t megasas_isr(int irq, void *devp) * has to wait for the ready state. */ int -megasas_transition_to_ready(struct megasas_instance* instance) +megasas_transition_to_ready(struct megasas_instance *instance, int ocr) { int i; u8 max_wait; @@ -2639,11 +2653,13 @@ megasas_transition_to_ready(struct megasas_instance* instance) switch (fw_state) { case MFI_STATE_FAULT: - printk(KERN_DEBUG "megasas: FW in FAULT state!!\n"); - max_wait = MEGASAS_RESET_WAIT_TIME; - cur_state = MFI_STATE_FAULT; - break; + if (ocr) { + max_wait = MEGASAS_RESET_WAIT_TIME; + cur_state = MFI_STATE_FAULT; + break; + } else + return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* @@ -2654,7 +2670,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == - PCI_DEVICE_ID_LSI_FUSION)) { + PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER)) { writel( MFI_INIT_CLEAR_HANDSHAKE|MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); @@ -2674,7 +2692,9 @@ megasas_transition_to_ready(struct megasas_instance* instance) (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device == - PCI_DEVICE_ID_LSI_FUSION)) { + PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER)) { writel(MFI_INIT_HOTPLUG, &instance->reg_set->doorbell); } else @@ -2695,11 +2715,15 @@ megasas_transition_to_ready(struct megasas_instance* instance) (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY) || (instance->pdev->device - == PCI_DEVICE_ID_LSI_FUSION)) { + == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device + == PCI_DEVICE_ID_LSI_INVADER)) { writel(MFI_RESET_FLAGS, &instance->reg_set->doorbell); - if (instance->pdev->device == - PCI_DEVICE_ID_LSI_FUSION) { + if ((instance->pdev->device == + PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER)) { for (i = 0; i < (10 * 1000); i += 20) { if (readl( &instance-> @@ -2922,6 +2946,10 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) memset(cmd->frame, 0, total_sz); cmd->frame->io.context = cmd->index; cmd->frame->io.pad_0 = 0; + if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && + (reset_devices)) + cmd->frame->hdr.cmd = MFI_CMD_INVALID; } return 0; @@ -3474,6 +3502,7 @@ static int megasas_init_fw(struct megasas_instance *instance) struct megasas_register_set __iomem *reg_set; struct megasas_ctrl_info *ctrl_info; unsigned long bar_list; + int i; /* Find first memory bar */ bar_list = pci_select_bars(instance->pdev, IORESOURCE_MEM); @@ -3496,6 +3525,7 @@ static int megasas_init_fw(struct megasas_instance *instance) switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_INVADER: instance->instancet = &megasas_instance_template_fusion; break; case PCI_DEVICE_ID_LSI_SAS1078R: @@ -3520,15 +3550,39 @@ static int megasas_init_fw(struct megasas_instance *instance) /* * We expect the FW state to be READY */ - if (megasas_transition_to_ready(instance)) + if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; /* Check if MSI-X is supported while in ready state */ msix_enable = (instance->instancet->read_fw_status_reg(reg_set) & 0x4000000) >> 0x1a; - if (msix_enable && !msix_disable && - !pci_enable_msix(instance->pdev, &instance->msixentry, 1)) - instance->msi_flag = 1; + if (msix_enable && !msix_disable) { + /* Check max MSI-X vectors */ + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { + instance->msix_vectors = (readl(&instance->reg_set-> + outbound_scratch_pad_2 + ) & 0x1F) + 1; + } else + instance->msix_vectors = 1; + /* Don't bother allocating more MSI-X vectors than cpus */ + instance->msix_vectors = min(instance->msix_vectors, + (unsigned int)num_online_cpus()); + for (i = 0; i < instance->msix_vectors; i++) + instance->msixentry[i].entry = i; + i = pci_enable_msix(instance->pdev, instance->msixentry, + instance->msix_vectors); + if (i >= 0) { + if (i) { + if (!pci_enable_msix(instance->pdev, + instance->msixentry, i)) + instance->msix_vectors = i; + else + instance->msix_vectors = 0; + } + } else + instance->msix_vectors = 0; + } /* Get operational params, sge flags, send init cmd to controller */ if (instance->instancet->init_adapter(instance)) @@ -3892,7 +3946,8 @@ static int megasas_io_attach(struct megasas_instance *instance) host->max_cmd_len = 16; /* Fusion only supports host reset */ - if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) { + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) { host->hostt->eh_device_reset_handler = NULL; host->hostt->eh_bus_reset_handler = NULL; } @@ -3942,7 +3997,7 @@ fail_set_dma_mask: static int __devinit megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) { - int rval, pos; + int rval, pos, i, j; struct Scsi_Host *host; struct megasas_instance *instance; u16 control = 0; @@ -4002,6 +4057,7 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_INVADER: { struct fusion_context *fusion; @@ -4094,7 +4150,8 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) instance->last_time = 0; instance->disableOnlineCtrlReset = 1; - if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq); else INIT_WORK(&instance->work_init, process_fw_state_change_wq); @@ -4108,11 +4165,32 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) /* * Register IRQ */ - if (request_irq(instance->msi_flag ? instance->msixentry.vector : - pdev->irq, instance->instancet->service_isr, - IRQF_SHARED, "megasas", instance)) { - printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); - goto fail_irq; + if (instance->msix_vectors) { + for (i = 0 ; i < instance->msix_vectors; i++) { + instance->irq_context[i].instance = instance; + instance->irq_context[i].MSIxIndex = i; + if (request_irq(instance->msixentry[i].vector, + instance->instancet->service_isr, 0, + "megasas", + &instance->irq_context[i])) { + printk(KERN_DEBUG "megasas: Failed to " + "register IRQ for vector %d.\n", i); + for (j = 0 ; j < i ; j++) + free_irq( + instance->msixentry[j].vector, + &instance->irq_context[j]); + goto fail_irq; + } + } + } else { + instance->irq_context[0].instance = instance; + instance->irq_context[0].MSIxIndex = 0; + if (request_irq(pdev->irq, instance->instancet->service_isr, + IRQF_SHARED, "megasas", + &instance->irq_context[0])) { + printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); + goto fail_irq; + } } instance->instancet->enable_intr(instance->reg_set); @@ -4156,15 +4234,20 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, NULL); instance->instancet->disable_intr(instance->reg_set); - free_irq(instance->msi_flag ? instance->msixentry.vector : - instance->pdev->irq, instance); + if (instance->msix_vectors) + for (i = 0 ; i < instance->msix_vectors; i++) + free_irq(instance->msixentry[i].vector, + &instance->irq_context[i]); + else + free_irq(instance->pdev->irq, &instance->irq_context[0]); fail_irq: - if (instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) + if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) || + (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER)) megasas_release_fusion(instance); else megasas_release_mfi(instance); fail_init_mfi: - if (instance->msi_flag) + if (instance->msix_vectors) pci_disable_msix(instance->pdev); fail_alloc_dma_buf: if (instance->evt_detail) @@ -4280,6 +4363,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) { struct Scsi_Host *host; struct megasas_instance *instance; + int i; instance = pci_get_drvdata(pdev); host = instance->host; @@ -4303,9 +4387,14 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) pci_set_drvdata(instance->pdev, instance); instance->instancet->disable_intr(instance->reg_set); - free_irq(instance->msi_flag ? instance->msixentry.vector : - instance->pdev->irq, instance); - if (instance->msi_flag) + + if (instance->msix_vectors) + for (i = 0 ; i < instance->msix_vectors; i++) + free_irq(instance->msixentry[i].vector, + &instance->irq_context[i]); + else + free_irq(instance->pdev->irq, &instance->irq_context[0]); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); pci_save_state(pdev); @@ -4323,7 +4412,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state) static int megasas_resume(struct pci_dev *pdev) { - int rval; + int rval, i, j; struct Scsi_Host *host; struct megasas_instance *instance; @@ -4357,15 +4446,17 @@ megasas_resume(struct pci_dev *pdev) /* * We expect the FW state to be READY */ - if (megasas_transition_to_ready(instance)) + if (megasas_transition_to_ready(instance, 0)) goto fail_ready_state; /* Now re-enable MSI-X */ - if (instance->msi_flag) - pci_enable_msix(instance->pdev, &instance->msixentry, 1); + if (instance->msix_vectors) + pci_enable_msix(instance->pdev, instance->msixentry, + instance->msix_vectors); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_INVADER: { megasas_reset_reply_desc(instance); if (megasas_ioc_init_fusion(instance)) { @@ -4391,11 +4482,32 @@ megasas_resume(struct pci_dev *pdev) /* * Register IRQ */ - if (request_irq(instance->msi_flag ? instance->msixentry.vector : - pdev->irq, instance->instancet->service_isr, - IRQF_SHARED, "megasas", instance)) { - printk(KERN_ERR "megasas: Failed to register IRQ\n"); - goto fail_irq; + if (instance->msix_vectors) { + for (i = 0 ; i < instance->msix_vectors; i++) { + instance->irq_context[i].instance = instance; + instance->irq_context[i].MSIxIndex = i; + if (request_irq(instance->msixentry[i].vector, + instance->instancet->service_isr, 0, + "megasas", + &instance->irq_context[i])) { + printk(KERN_DEBUG "megasas: Failed to " + "register IRQ for vector %d.\n", i); + for (j = 0 ; j < i ; j++) + free_irq( + instance->msixentry[j].vector, + &instance->irq_context[j]); + goto fail_irq; + } + } + } else { + instance->irq_context[0].instance = instance; + instance->irq_context[0].MSIxIndex = 0; + if (request_irq(pdev->irq, instance->instancet->service_isr, + IRQF_SHARED, "megasas", + &instance->irq_context[0])) { + printk(KERN_DEBUG "megasas: Failed to register IRQ\n"); + goto fail_irq; + } } instance->instancet->enable_intr(instance->reg_set); @@ -4492,13 +4604,18 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) instance->instancet->disable_intr(instance->reg_set); - free_irq(instance->msi_flag ? instance->msixentry.vector : - instance->pdev->irq, instance); - if (instance->msi_flag) + if (instance->msix_vectors) + for (i = 0 ; i < instance->msix_vectors; i++) + free_irq(instance->msixentry[i].vector, + &instance->irq_context[i]); + else + free_irq(instance->pdev->irq, &instance->irq_context[0]); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); switch (instance->pdev->device) { case PCI_DEVICE_ID_LSI_FUSION: + case PCI_DEVICE_ID_LSI_INVADER: megasas_release_fusion(instance); for (i = 0; i < 2 ; i++) if (fusion->ld_map[i]) @@ -4539,14 +4656,20 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev) */ static void megasas_shutdown(struct pci_dev *pdev) { + int i; struct megasas_instance *instance = pci_get_drvdata(pdev); + instance->unload = 1; megasas_flush_cache(instance); megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN); instance->instancet->disable_intr(instance->reg_set); - free_irq(instance->msi_flag ? instance->msixentry.vector : - instance->pdev->irq, instance); - if (instance->msi_flag) + if (instance->msix_vectors) + for (i = 0 ; i < instance->msix_vectors; i++) + free_irq(instance->msixentry[i].vector, + &instance->irq_context[i]); + else + free_irq(instance->pdev->irq, &instance->irq_context[0]); + if (instance->msix_vectors) pci_disable_msix(instance->pdev); } diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 5a5af1fe7581..5255dd688aca 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c @@ -52,6 +52,7 @@ #include <scsi/scsi_host.h> #include "megaraid_sas_fusion.h" +#include "megaraid_sas.h" #include <asm/div64.h> #define ABS_DIFF(a, b) (((a) > (b)) ? ((a) - (b)) : ((b) - (a))) @@ -226,8 +227,9 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, * span - Span number * block - Absolute Block number in the physical disk */ -u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, - u16 *pDevHandle, struct RAID_CONTEXT *pRAID_Context, +u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, + u16 stripRef, u64 *pdBlock, u16 *pDevHandle, + struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map) { struct MR_LD_RAID *raid = MR_LdRaidGet(ld, map); @@ -279,7 +281,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, *pDevHandle = MR_PdDevHandleGet(pd, map); else { *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */ - if (raid->level >= 5) + if ((raid->level >= 5) && + (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER)) pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE; else if (raid->level == 1) { /* Get alternate Pd. */ @@ -306,7 +309,8 @@ u8 MR_GetPhyParams(u32 ld, u64 stripRow, u16 stripRef, u64 *pdBlock, * This function will return 0 if region lock was acquired OR return num strips */ u8 -MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map) { @@ -394,8 +398,12 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, } pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; - pRAID_Context->regLockFlags = (isRead) ? REGION_TYPE_SHARED_READ : - raid->regTypeReqOnWrite; + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) + pRAID_Context->regLockFlags = (isRead) ? + raid->regTypeReqOnRead : raid->regTypeReqOnWrite; + else + pRAID_Context->regLockFlags = (isRead) ? + REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; pRAID_Context->VirtualDiskTgtId = raid->targetId; pRAID_Context->regLockRowLBA = regStart; pRAID_Context->regLockLength = regSize; @@ -404,7 +412,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, /*Get Phy Params only if FP capable, or else leave it to MR firmware to do the calculation.*/ if (io_info->fpOkForIo) { - retval = MR_GetPhyParams(ld, start_strip, ref_in_start_stripe, + retval = MR_GetPhyParams(instance, ld, start_strip, + ref_in_start_stripe, &io_info->pdBlock, &io_info->devHandle, pRAID_Context, map); @@ -415,7 +424,8 @@ MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, } else if (isRead) { uint stripIdx; for (stripIdx = 0; stripIdx < num_strips; stripIdx++) { - if (!MR_GetPhyParams(ld, start_strip + stripIdx, + if (!MR_GetPhyParams(instance, ld, + start_strip + stripIdx, ref_in_start_stripe, &io_info->pdBlock, &io_info->devHandle, diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index f13e7abd345a..bfd87fab39aa 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -74,7 +74,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd); u8 -MR_BuildRaidContext(struct IO_REQUEST_INFO *io_info, +MR_BuildRaidContext(struct megasas_instance *instance, + struct IO_REQUEST_INFO *io_info, struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map); u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); @@ -89,7 +90,7 @@ u8 MR_ValidateMapInfo(struct MR_FW_RAID_MAP_ALL *map, struct LD_LOAD_BALANCE_INFO *lbInfo); u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info); -int megasas_transition_to_ready(struct megasas_instance *instance); +int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); void megaraid_sas_kill_hba(struct megasas_instance *instance); extern u32 megasas_dbg_lvl; @@ -101,6 +102,10 @@ extern u32 megasas_dbg_lvl; void megasas_enable_intr_fusion(struct megasas_register_set __iomem *regs) { + /* For Thunderbolt/Invader also clear intr on enable */ + writel(~0, ®s->outbound_intr_status); + readl(®s->outbound_intr_status); + writel(~MFI_FUSION_ENABLE_INTERRUPT_MASK, &(regs)->outbound_intr_mask); /* Dummy readl to force pci flush */ @@ -139,11 +144,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs) if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return 0; - /* - * dummy read to flush PCI - */ - readl(®s->outbound_intr_status); - return 1; } @@ -385,7 +385,7 @@ static int megasas_create_frame_pool_fusion(struct megasas_instance *instance) int megasas_alloc_cmds_fusion(struct megasas_instance *instance) { - int i, j; + int i, j, count; u32 max_cmd, io_frames_sz; struct fusion_context *fusion; struct megasas_cmd_fusion *cmd; @@ -409,9 +409,10 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) goto fail_req_desc; } + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; fusion->reply_frames_desc_pool = pci_pool_create("reply_frames pool", instance->pdev, - fusion->reply_alloc_sz, 16, 0); + fusion->reply_alloc_sz * count, 16, 0); if (!fusion->reply_frames_desc_pool) { printk(KERN_ERR "megasas; Could not allocate memory for " @@ -430,7 +431,7 @@ megasas_alloc_cmds_fusion(struct megasas_instance *instance) } reply_desc = fusion->reply_frames_desc; - for (i = 0; i < fusion->reply_q_depth; i++, reply_desc++) + for (i = 0; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = ULLONG_MAX; io_frames_sz = fusion->io_frames_alloc_sz; @@ -590,7 +591,6 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) struct megasas_init_frame *init_frame; struct MPI2_IOC_INIT_REQUEST *IOCInitMessage; dma_addr_t ioc_init_handle; - u32 context; struct megasas_cmd *cmd; u8 ret; struct fusion_context *fusion; @@ -634,14 +634,13 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) fusion->reply_frames_desc_phys; IOCInitMessage->SystemRequestFrameBaseAddress = fusion->io_request_frames_phys; - + /* Set to 0 for none or 1 MSI-X vectors */ + IOCInitMessage->HostMSIxVectors = (instance->msix_vectors > 0 ? + instance->msix_vectors : 0); init_frame = (struct megasas_init_frame *)cmd->frame; memset(init_frame, 0, MEGAMFI_FRAME_SIZE); frame_hdr = &cmd->frame->hdr; - context = init_frame->context; - init_frame->context = context; - frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; @@ -881,7 +880,7 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) struct megasas_register_set __iomem *reg_set; struct fusion_context *fusion; u32 max_cmd; - int i = 0; + int i = 0, count; fusion = instance->ctrl_context; @@ -933,7 +932,9 @@ megasas_init_adapter_fusion(struct megasas_instance *instance) (MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(union MPI2_SGE_IO_UNION))/16; - fusion->last_reply_idx = 0; + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + for (i = 0 ; i < count; i++) + fusion->last_reply_idx[i] = 0; /* * Allocate memory for descriptors @@ -1043,7 +1044,9 @@ map_cmd_status(struct megasas_cmd_fusion *cmd, u8 status, u8 ext_status) case MFI_STAT_DEVICE_NOT_FOUND: cmd->scmd->result = DID_BAD_TARGET << 16; break; - + case MFI_STAT_CONFIG_SEQ_MISMATCH: + cmd->scmd->result = DID_IMM_RETRY << 16; + break; default: printk(KERN_DEBUG "megasas: FW status %#x\n", status); cmd->scmd->result = DID_ERROR << 16; @@ -1066,14 +1069,17 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr, struct megasas_cmd_fusion *cmd) { - int i, sg_processed; - int sge_count, sge_idx; + int i, sg_processed, sge_count; struct scatterlist *os_sgl; struct fusion_context *fusion; fusion = instance->ctrl_context; - cmd->io_request->ChainOffset = 0; + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = sgl_ptr; + sgl_ptr_end += fusion->max_sge_in_main_msg - 1; + sgl_ptr_end->Flags = 0; + } sge_count = scsi_dma_map(scp); @@ -1082,16 +1088,14 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, if (sge_count > instance->max_num_sge || !sge_count) return sge_count; - if (sge_count > fusion->max_sge_in_main_msg) { - /* One element to store the chain info */ - sge_idx = fusion->max_sge_in_main_msg - 1; - } else - sge_idx = sge_count; - scsi_for_each_sg(scp, os_sgl, sge_count, i) { sgl_ptr->Length = sg_dma_len(os_sgl); sgl_ptr->Address = sg_dma_address(os_sgl); sgl_ptr->Flags = 0; + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if (i == sge_count - 1) + sgl_ptr->Flags = IEEE_SGE_FLAGS_END_OF_LIST; + } sgl_ptr++; sg_processed = i + 1; @@ -1100,13 +1104,30 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, (sge_count > fusion->max_sge_in_main_msg)) { struct MPI25_IEEE_SGE_CHAIN64 *sg_chain; - cmd->io_request->ChainOffset = - fusion->chain_offset_io_request; + if (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) { + if ((cmd->io_request->IoFlags & + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) + cmd->io_request->ChainOffset = + fusion-> + chain_offset_io_request; + else + cmd->io_request->ChainOffset = 0; + } else + cmd->io_request->ChainOffset = + fusion->chain_offset_io_request; + sg_chain = sgl_ptr; /* Prepare chain element */ sg_chain->NextChainOffset = 0; - sg_chain->Flags = (IEEE_SGE_FLAGS_CHAIN_ELEMENT | - MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); + if (instance->pdev->device == + PCI_DEVICE_ID_LSI_INVADER) + sg_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT; + else + sg_chain->Flags = + (IEEE_SGE_FLAGS_CHAIN_ELEMENT | + MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) *(sge_count - sg_processed)); sg_chain->Address = cmd->sg_frame_phys_addr; @@ -1399,11 +1420,18 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, io_request->RaidContext.regLockFlags = 0; fp_possible = 0; } else { - if (MR_BuildRaidContext(&io_info, &io_request->RaidContext, + if (MR_BuildRaidContext(instance, &io_info, + &io_request->RaidContext, local_map_ptr)) fp_possible = io_info.fpOkForIo; } + /* Use smp_processor_id() for now until cmd->request->cpu is CPU + id by default, not CPU group id, otherwise all MSI-X queues won't + be utilized */ + cmd->request_desc->SCSIIO.MSIxIndex = instance->msix_vectors ? + smp_processor_id() % instance->msix_vectors : 0; + if (fp_possible) { megasas_set_pd_lba(io_request, scp->cmd_len, &io_info, scp, local_map_ptr, start_lba_lo); @@ -1412,6 +1440,20 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, cmd->request_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if (io_request->RaidContext.regLockFlags == + REGION_TYPE_UNUSED) + cmd->request_desc->SCSIIO.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + io_request->RaidContext.Type = MPI2_TYPE_CUDA; + io_request->RaidContext.nseg = 0x1; + io_request->IoFlags |= + MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; + io_request->RaidContext.regLockFlags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + } if ((fusion->load_balance_info[device_id].loadBalanceFlag) && (io_info.isRead)) { io_info.devHandle = @@ -1426,11 +1468,23 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, } else { io_request->RaidContext.timeoutValue = local_map_ptr->raidMap.fpPdIoTimeoutSec; - io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; - io_request->DevHandle = device_id; cmd->request_desc->SCSIIO.RequestFlags = (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + if (io_request->RaidContext.regLockFlags == + REGION_TYPE_UNUSED) + cmd->request_desc->SCSIIO.RequestFlags = + (MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK << + MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); + io_request->RaidContext.Type = MPI2_TYPE_CUDA; + io_request->RaidContext.regLockFlags |= + (MR_RL_FLAGS_GRANT_DESTINATION_CPU0 | + MR_RL_FLAGS_SEQ_NUM_ENABLE); + io_request->RaidContext.nseg = 0x1; + } + io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; + io_request->DevHandle = device_id; } /* Not FP */ } @@ -1513,8 +1567,10 @@ megasas_build_io_fusion(struct megasas_instance *instance, io_request->EEDPFlags = 0; io_request->Control = 0; io_request->EEDPBlockSize = 0; - io_request->IoFlags = 0; + io_request->ChainOffset = 0; io_request->RaidContext.RAIDFlags = 0; + io_request->RaidContext.Type = 0; + io_request->RaidContext.nseg = 0; memcpy(io_request->CDB.CDB32, scp->cmnd, scp->cmd_len); /* @@ -1612,7 +1668,6 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, req_desc->Words = 0; cmd->request_desc = req_desc; - cmd->request_desc->Words = 0; if (megasas_build_io_fusion(instance, scmd, cmd)) { megasas_return_cmd_fusion(instance, cmd); @@ -1647,7 +1702,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, * Completes all commands that is in reply descriptor queue */ int -complete_cmd_fusion(struct megasas_instance *instance) +complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) { union MPI2_REPLY_DESCRIPTORS_UNION *desc; struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; @@ -1667,7 +1722,9 @@ complete_cmd_fusion(struct megasas_instance *instance) return IRQ_HANDLED; desc = fusion->reply_frames_desc; - desc += fusion->last_reply_idx; + desc += ((MSIxIndex * fusion->reply_alloc_sz)/ + sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)) + + fusion->last_reply_idx[MSIxIndex]; reply_desc = (struct MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *)desc; @@ -1740,16 +1797,19 @@ complete_cmd_fusion(struct megasas_instance *instance) break; } - fusion->last_reply_idx++; - if (fusion->last_reply_idx >= fusion->reply_q_depth) - fusion->last_reply_idx = 0; + fusion->last_reply_idx[MSIxIndex]++; + if (fusion->last_reply_idx[MSIxIndex] >= + fusion->reply_q_depth) + fusion->last_reply_idx[MSIxIndex] = 0; desc->Words = ULLONG_MAX; num_completed++; /* Get the next reply descriptor */ - if (!fusion->last_reply_idx) - desc = fusion->reply_frames_desc; + if (!fusion->last_reply_idx[MSIxIndex]) + desc = fusion->reply_frames_desc + + ((MSIxIndex * fusion->reply_alloc_sz)/ + sizeof(union MPI2_REPLY_DESCRIPTORS_UNION)); else desc++; @@ -1769,7 +1829,7 @@ complete_cmd_fusion(struct megasas_instance *instance) return IRQ_NONE; wmb(); - writel(fusion->last_reply_idx, + writel((MSIxIndex << 24) | fusion->last_reply_idx[MSIxIndex], &instance->reg_set->reply_post_host_index); megasas_check_and_restore_queue_depth(instance); return IRQ_HANDLED; @@ -1787,6 +1847,9 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) struct megasas_instance *instance = (struct megasas_instance *)instance_addr; unsigned long flags; + u32 count, MSIxIndex; + + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; /* If we have already declared adapter dead, donot complete cmds */ spin_lock_irqsave(&instance->hba_lock, flags); @@ -1797,7 +1860,8 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) spin_unlock_irqrestore(&instance->hba_lock, flags); spin_lock_irqsave(&instance->completion_lock, flags); - complete_cmd_fusion(instance); + for (MSIxIndex = 0 ; MSIxIndex < count; MSIxIndex++) + complete_cmd_fusion(instance, MSIxIndex); spin_unlock_irqrestore(&instance->completion_lock, flags); } @@ -1806,20 +1870,24 @@ megasas_complete_cmd_dpc_fusion(unsigned long instance_addr) */ irqreturn_t megasas_isr_fusion(int irq, void *devp) { - struct megasas_instance *instance = (struct megasas_instance *)devp; + struct megasas_irq_context *irq_context = devp; + struct megasas_instance *instance = irq_context->instance; u32 mfiStatus, fw_state; - if (!instance->msi_flag) { + if (!instance->msix_vectors) { mfiStatus = instance->instancet->clear_intr(instance->reg_set); if (!mfiStatus) return IRQ_NONE; } /* If we are resetting, bail */ - if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) + if (test_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags)) { + instance->instancet->clear_intr(instance->reg_set); return IRQ_HANDLED; + } - if (!complete_cmd_fusion(instance)) { + if (!complete_cmd_fusion(instance, irq_context->MSIxIndex)) { + instance->instancet->clear_intr(instance->reg_set); /* If we didn't complete any commands, check for FW fault */ fw_state = instance->instancet->read_fw_status_reg( instance->reg_set) & MFI_STATE_MASK; @@ -1866,6 +1934,14 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, fusion = instance->ctrl_context; io_req = cmd->io_request; + + if (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) { + struct MPI25_IEEE_SGE_CHAIN64 *sgl_ptr_end = + (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL; + sgl_ptr_end += fusion->max_sge_in_main_msg - 1; + sgl_ptr_end->Flags = 0; + } + mpi25_ieee_chain = (struct MPI25_IEEE_SGE_CHAIN64 *)&io_req->SGL.IeeeChain; @@ -1928,15 +2004,12 @@ megasas_issue_dcmd_fusion(struct megasas_instance *instance, struct megasas_cmd *cmd) { union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc; - union desc_value d_val; req_desc = build_mpt_cmd(instance, cmd); if (!req_desc) { printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n"); return; } - d_val.word = req_desc->Words; - instance->instancet->fire_cmd(instance, req_desc->u.low, req_desc->u.high, instance->reg_set); } @@ -2029,14 +2102,16 @@ out: void megasas_reset_reply_desc(struct megasas_instance *instance) { - int i; + int i, count; struct fusion_context *fusion; union MPI2_REPLY_DESCRIPTORS_UNION *reply_desc; fusion = instance->ctrl_context; - fusion->last_reply_idx = 0; + count = instance->msix_vectors > 0 ? instance->msix_vectors : 1; + for (i = 0 ; i < count ; i++) + fusion->last_reply_idx[i] = 0; reply_desc = fusion->reply_frames_desc; - for (i = 0 ; i < fusion->reply_q_depth; i++, reply_desc++) + for (i = 0 ; i < fusion->reply_q_depth * count; i++, reply_desc++) reply_desc->Words = ULLONG_MAX; } @@ -2057,8 +2132,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost) if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) { printk(KERN_WARNING "megaraid_sas: Hardware critical error, " "returning FAILED.\n"); - retval = FAILED; - goto out; + return FAILED; } mutex_lock(&instance->reset_mutex); @@ -2173,7 +2247,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost) } /* Wait for FW to become ready */ - if (megasas_transition_to_ready(instance)) { + if (megasas_transition_to_ready(instance, 1)) { printk(KERN_WARNING "megaraid_sas: Failed to " "transition controller to ready.\n"); continue; @@ -2186,6 +2260,8 @@ int megasas_reset_fusion(struct Scsi_Host *shost) continue; } + clear_bit(MEGASAS_FUSION_IN_RESET, + &instance->reset_flags); instance->instancet->enable_intr(instance->reg_set); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; @@ -2247,6 +2323,7 @@ int megasas_reset_fusion(struct Scsi_Host *shost) megaraid_sas_kill_hba(instance); retval = FAILED; } else { + clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance->reg_set); instance->adprecovery = MEGASAS_HBA_OPERATIONAL; } diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 82b577a72c8b..088c9f91da95 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h @@ -43,6 +43,15 @@ #define HOST_DIAG_WRITE_ENABLE 0x80 #define HOST_DIAG_RESET_ADAPTER 0x4 #define MEGASAS_FUSION_MAX_RESET_TRIES 3 +#define MAX_MSIX_QUEUES_FUSION 16 + +/* Invader defines */ +#define MPI2_TYPE_CUDA 0x2 +#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH 0x4000 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU0 0x00 +#define MR_RL_FLAGS_GRANT_DESTINATION_CPU1 0x10 +#define MR_RL_FLAGS_GRANT_DESTINATION_CUDA 0x80 +#define MR_RL_FLAGS_SEQ_NUM_ENABLE 0x8 /* T10 PI defines */ #define MR_PROT_INFO_TYPE_CONTROLLER 0x8 @@ -70,7 +79,7 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { */ #define MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO 0x7 #define MEGASAS_REQ_DESCRIPT_FLAGS_MFA 0x1 - +#define MEGASAS_REQ_DESCRIPT_FLAGS_NO_LOCK 0x2 #define MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT 1 #define MEGASAS_FP_CMD_LEN 16 @@ -82,7 +91,9 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { */ struct RAID_CONTEXT { - u16 resvd0; + u8 Type:4; + u8 nseg:4; + u8 resvd0; u16 timeoutValue; u8 regLockFlags; u8 resvd1; @@ -527,7 +538,7 @@ struct MR_LD_RAID { u8 ldState; u8 regTypeReqOnWrite; u8 modFactor; - u8 reserved2[1]; + u8 regTypeReqOnRead; u16 seqNum; struct { @@ -663,7 +674,7 @@ struct fusion_context { union MPI2_REPLY_DESCRIPTORS_UNION *reply_frames_desc; struct dma_pool *reply_frames_desc_pool; - u16 last_reply_idx; + u16 last_reply_idx[MAX_MSIX_QUEUES_FUSION]; u32 reply_q_depth; u32 request_alloc_sz; diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c index 6825772cfd6a..81209ca87274 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.c +++ b/drivers/scsi/mpt2sas/mpt2sas_base.c @@ -833,25 +833,31 @@ union reply_descriptor { static irqreturn_t _base_interrupt(int irq, void *bus_id) { + struct adapter_reply_queue *reply_q = bus_id; union reply_descriptor rd; u32 completed_cmds; u8 request_desript_type; u16 smid; u8 cb_idx; u32 reply; - u8 msix_index; - struct MPT2SAS_ADAPTER *ioc = bus_id; + u8 msix_index = reply_q->msix_index; + struct MPT2SAS_ADAPTER *ioc = reply_q->ioc; Mpi2ReplyDescriptorsUnion_t *rpf; u8 rc; if (ioc->mask_interrupts) return IRQ_NONE; - rpf = &ioc->reply_post_free[ioc->reply_post_host_index]; + if (!atomic_add_unless(&reply_q->busy, 1, 1)) + return IRQ_NONE; + + rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index]; request_desript_type = rpf->Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; - if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) + if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) { + atomic_dec(&reply_q->busy); return IRQ_NONE; + } completed_cmds = 0; cb_idx = 0xFF; @@ -860,9 +866,7 @@ _base_interrupt(int irq, void *bus_id) if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX) goto out; reply = 0; - cb_idx = 0xFF; smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1); - msix_index = rpf->Default.MSIxIndex; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) { reply = le32_to_cpu @@ -906,32 +910,86 @@ _base_interrupt(int irq, void *bus_id) next: rpf->Words = cpu_to_le64(ULLONG_MAX); - ioc->reply_post_host_index = (ioc->reply_post_host_index == + reply_q->reply_post_host_index = + (reply_q->reply_post_host_index == (ioc->reply_post_queue_depth - 1)) ? 0 : - ioc->reply_post_host_index + 1; + reply_q->reply_post_host_index + 1; request_desript_type = - ioc->reply_post_free[ioc->reply_post_host_index].Default. - ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; + reply_q->reply_post_free[reply_q->reply_post_host_index]. + Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; completed_cmds++; if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) goto out; - if (!ioc->reply_post_host_index) - rpf = ioc->reply_post_free; + if (!reply_q->reply_post_host_index) + rpf = reply_q->reply_post_free; else rpf++; } while (1); out: - if (!completed_cmds) + if (!completed_cmds) { + atomic_dec(&reply_q->busy); return IRQ_NONE; - + } wmb(); - writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex); + if (ioc->is_warpdrive) { + writel(reply_q->reply_post_host_index, + ioc->reply_post_host_index[msix_index]); + atomic_dec(&reply_q->busy); + return IRQ_HANDLED; + } + writel(reply_q->reply_post_host_index | (msix_index << + MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex); + atomic_dec(&reply_q->busy); return IRQ_HANDLED; } /** + * _base_is_controller_msix_enabled - is controller support muli-reply queues + * @ioc: per adapter object + * + */ +static inline int +_base_is_controller_msix_enabled(struct MPT2SAS_ADAPTER *ioc) +{ + return (ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable; +} + +/** + * mpt2sas_base_flush_reply_queues - flushing the MSIX reply queues + * @ioc: per adapter object + * Context: ISR conext + * + * Called when a Task Management request has completed. We want + * to flush the other reply queues so all the outstanding IO has been + * completed back to OS before we process the TM completetion. + * + * Return nothing. + */ +void +mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + + /* If MSIX capability is turned off + * then multi-queues are not enabled + */ + if (!_base_is_controller_msix_enabled(ioc)) + return; + + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + if (ioc->shost_recovery) + return; + /* TMs are on msix_index == 0 */ + if (reply_q->msix_index == 0) + continue; + _base_interrupt(reply_q->vector, (void *)reply_q); + } +} + +/** * mpt2sas_base_release_callback_handler - clear interrupt callback handler * @cb_idx: callback index * @@ -1081,74 +1139,171 @@ _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev) } /** - * _base_save_msix_table - backup msix vector table + * _base_check_enable_msix - checks MSIX capabable. * @ioc: per adapter object * - * This address an errata where diag reset clears out the table + * Check to see if card is capable of MSIX, and set number + * of available msix vectors */ -static void -_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc) +static int +_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) { - int i; + int base; + u16 message_control; - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) - return; - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table_backup[i] = ioc->msix_table[i]; + base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); + if (!base) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " + "supported\n", ioc->name)); + return -EINVAL; + } + + /* get msix vector count */ + /* NUMA_IO not supported for older controllers */ + if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 || + ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2) + ioc->msix_vector_count = 1; + else { + pci_read_config_word(ioc->pdev, base + 2, &message_control); + ioc->msix_vector_count = (message_control & 0x3FF) + 1; + } + dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " + "vector_count(%d)\n", ioc->name, ioc->msix_vector_count)); + + return 0; } /** - * _base_restore_msix_table - this restores the msix vector table + * _base_free_irq - free irq * @ioc: per adapter object * + * Freeing respective reply_queue from the list. */ static void -_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc) +_base_free_irq(struct MPT2SAS_ADAPTER *ioc) { - int i; + struct adapter_reply_queue *reply_q, *next; - if (!ioc->msix_enable || ioc->msix_table_backup == NULL) + if (list_empty(&ioc->reply_queue_list)) return; - for (i = 0; i < ioc->msix_vector_count; i++) - ioc->msix_table[i] = ioc->msix_table_backup[i]; + list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) { + list_del(&reply_q->list); + synchronize_irq(reply_q->vector); + free_irq(reply_q->vector, reply_q); + kfree(reply_q); + } } /** - * _base_check_enable_msix - checks MSIX capabable. + * _base_request_irq - request irq * @ioc: per adapter object + * @index: msix index into vector table + * @vector: irq vector * - * Check to see if card is capable of MSIX, and set number - * of available msix vectors + * Inserting respective reply_queue into the list. */ static int -_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc) +_base_request_irq(struct MPT2SAS_ADAPTER *ioc, u8 index, u32 vector) { - int base; - u16 message_control; - u32 msix_table_offset; + struct adapter_reply_queue *reply_q; + int r; - base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX); - if (!base) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not " - "supported\n", ioc->name)); - return -EINVAL; + reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL); + if (!reply_q) { + printk(MPT2SAS_ERR_FMT "unable to allocate memory %d!\n", + ioc->name, (int)sizeof(struct adapter_reply_queue)); + return -ENOMEM; + } + reply_q->ioc = ioc; + reply_q->msix_index = index; + reply_q->vector = vector; + atomic_set(&reply_q->busy, 0); + if (ioc->msix_enable) + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d", + MPT2SAS_DRIVER_NAME, ioc->id, index); + else + snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d", + MPT2SAS_DRIVER_NAME, ioc->id); + r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name, + reply_q); + if (r) { + printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n", + reply_q->name, vector); + kfree(reply_q); + return -EBUSY; } - /* get msix vector count */ - pci_read_config_word(ioc->pdev, base + 2, &message_control); - ioc->msix_vector_count = (message_control & 0x3FF) + 1; + INIT_LIST_HEAD(&reply_q->list); + list_add_tail(&reply_q->list, &ioc->reply_queue_list); + return 0; +} - /* get msix table */ - pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset); - msix_table_offset &= 0xFFFFFFF8; - ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset); +/** + * _base_assign_reply_queues - assigning msix index for each cpu + * @ioc: per adapter object + * + * The enduser would need to set the affinity via /proc/irq/#/smp_affinity + * + * It would nice if we could call irq_set_affinity, however it is not + * an exported symbol + */ +static void +_base_assign_reply_queues(struct MPT2SAS_ADAPTER *ioc) +{ + struct adapter_reply_queue *reply_q; + int cpu_id; + int cpu_grouping, loop, grouping, grouping_mod; - dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, " - "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name, - ioc->msix_vector_count, msix_table_offset, ioc->msix_table)); - return 0; + if (!_base_is_controller_msix_enabled(ioc)) + return; + + memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz); + /* when there are more cpus than available msix vectors, + * then group cpus togeather on same irq + */ + if (ioc->cpu_count > ioc->msix_vector_count) { + grouping = ioc->cpu_count / ioc->msix_vector_count; + grouping_mod = ioc->cpu_count % ioc->msix_vector_count; + if (grouping < 2 || (grouping == 2 && !grouping_mod)) + cpu_grouping = 2; + else if (grouping < 4 || (grouping == 4 && !grouping_mod)) + cpu_grouping = 4; + else if (grouping < 8 || (grouping == 8 && !grouping_mod)) + cpu_grouping = 8; + else + cpu_grouping = 16; + } else + cpu_grouping = 0; + + loop = 0; + reply_q = list_entry(ioc->reply_queue_list.next, + struct adapter_reply_queue, list); + for_each_online_cpu(cpu_id) { + if (!cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = reply_q->msix_index; + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + } else { + if (loop < cpu_grouping) { + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop++; + } else { + reply_q = list_entry(reply_q->list.next, + struct adapter_reply_queue, list); + ioc->cpu_msix_table[cpu_id] = + reply_q->msix_index; + loop = 1; + } + } + } } /** @@ -1161,8 +1316,6 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc) { if (ioc->msix_enable) { pci_disable_msix(ioc->pdev); - kfree(ioc->msix_table_backup); - ioc->msix_table_backup = NULL; ioc->msix_enable = 0; } } @@ -1175,10 +1328,13 @@ _base_disable_msix(struct MPT2SAS_ADAPTER *ioc) static int _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) { - struct msix_entry entries; + struct msix_entry *entries, *a; int r; + int i; u8 try_msix = 0; + INIT_LIST_HEAD(&ioc->reply_queue_list); + if (msix_disable == -1 || msix_disable == 0) try_msix = 1; @@ -1188,51 +1344,48 @@ _base_enable_msix(struct MPT2SAS_ADAPTER *ioc) if (_base_check_enable_msix(ioc) != 0) goto try_ioapic; - ioc->msix_table_backup = kcalloc(ioc->msix_vector_count, - sizeof(u32), GFP_KERNEL); - if (!ioc->msix_table_backup) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " - "msix_table_backup failed!!!\n", ioc->name)); + ioc->reply_queue_count = min_t(u8, ioc->cpu_count, + ioc->msix_vector_count); + + entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry), + GFP_KERNEL); + if (!entries) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "kcalloc " + "failed @ at %s:%d/%s() !!!\n", ioc->name, __FILE__, + __LINE__, __func__)); goto try_ioapic; } - memset(&entries, 0, sizeof(struct msix_entry)); - r = pci_enable_msix(ioc->pdev, &entries, 1); + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) + a->entry = i; + + r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count); if (r) { dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix " "failed (r=%d) !!!\n", ioc->name, r)); + kfree(entries); goto try_ioapic; } - r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED, - ioc->name, ioc); - if (r) { - dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate " - "interrupt %d !!!\n", ioc->name, entries.vector)); - pci_disable_msix(ioc->pdev); - goto try_ioapic; + ioc->msix_enable = 1; + for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) { + r = _base_request_irq(ioc, i, a->vector); + if (r) { + _base_free_irq(ioc); + _base_disable_msix(ioc); + kfree(entries); + goto try_ioapic; + } } - ioc->pci_irq = entries.vector; - ioc->msix_enable = 1; + kfree(entries); return 0; /* failback to io_apic interrupt routing */ try_ioapic: - r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED, - ioc->name, ioc); - if (r) { - printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n", - ioc->name, ioc->pdev->irq); - r = -EBUSY; - goto out_fail; - } + r = _base_request_irq(ioc, 0, ioc->pdev->irq); - ioc->pci_irq = ioc->pdev->irq; - return 0; - - out_fail: return r; } @@ -1251,6 +1404,7 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) int i, r = 0; u64 pio_chip = 0; u64 chip_phys = 0; + struct adapter_reply_queue *reply_q; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -1313,9 +1467,11 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (r) goto out_fail; - printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", - ioc->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : - "IO-APIC enabled"), ioc->pci_irq); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) + printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n", + reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" : + "IO-APIC enabled"), reply_q->vector); + printk(MPT2SAS_INFO_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n", ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz); printk(MPT2SAS_INFO_FMT "ioport(0x%016llx), size(%d)\n", @@ -1330,7 +1486,6 @@ mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc) if (ioc->chip_phys) iounmap(ioc->chip); ioc->chip_phys = 0; - ioc->pci_irq = -1; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_pcie_error_reporting(pdev); pci_disable_device(pdev); @@ -1577,6 +1732,12 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr, } #endif +static inline u8 +_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) +{ + return ioc->cpu_msix_table[smp_processor_id()]; +} + /** * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware * @ioc: per adapter object @@ -1593,7 +1754,7 @@ mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle) descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO; - descriptor.SCSIIO.MSIxIndex = 0; /* TODO */ + descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc); descriptor.SCSIIO.SMID = cpu_to_le16(smid); descriptor.SCSIIO.DevHandle = cpu_to_le16(handle); descriptor.SCSIIO.LMID = 0; @@ -1617,7 +1778,7 @@ mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid) descriptor.HighPriority.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY; - descriptor.HighPriority.MSIxIndex = 0; /* TODO */ + descriptor.HighPriority.MSIxIndex = 0; descriptor.HighPriority.SMID = cpu_to_le16(smid); descriptor.HighPriority.LMID = 0; descriptor.HighPriority.Reserved1 = 0; @@ -1639,7 +1800,7 @@ mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid) u64 *request = (u64 *)&descriptor; descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; - descriptor.Default.MSIxIndex = 0; /* TODO */ + descriptor.Default.MSIxIndex = _base_get_msix_index(ioc); descriptor.Default.SMID = cpu_to_le16(smid); descriptor.Default.LMID = 0; descriptor.Default.DescriptorTypeDependent = 0; @@ -1664,7 +1825,7 @@ mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid, descriptor.SCSITarget.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET; - descriptor.SCSITarget.MSIxIndex = 0; /* TODO */ + descriptor.SCSITarget.MSIxIndex = _base_get_msix_index(ioc); descriptor.SCSITarget.SMID = cpu_to_le16(smid); descriptor.SCSITarget.LMID = 0; descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index); @@ -2171,7 +2332,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u16 max_sge_elements; u16 num_of_reply_frames; u16 chains_needed_per_io; - u32 sz, total_sz; + u32 sz, total_sz, reply_post_free_sz; u32 retry_sz; u16 max_request_credit; int i; @@ -2498,7 +2659,12 @@ chain_done: total_sz += sz; /* reply post queue, 16 byte align */ - sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t); + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + if (_base_is_controller_msix_enabled(ioc)) + sz = reply_post_free_sz * ioc->reply_queue_count; + else + sz = reply_post_free_sz; ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool", ioc->pdev, sz, 16, 0); if (!ioc->reply_post_free_dma_pool) { @@ -3186,6 +3352,7 @@ _base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) facts->MaxChainDepth = mpi_reply.MaxChainDepth; facts->WhoInit = mpi_reply.WhoInit; facts->NumberOfPorts = mpi_reply.NumberOfPorts; + facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors; facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit); facts->MaxReplyDescriptorPostQueueDepth = le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth); @@ -3243,7 +3410,8 @@ _base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION); mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); - + if (_base_is_controller_msix_enabled(ioc)) + mpi_request.HostMSIxVectors = ioc->reply_queue_count; mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4); mpi_request.ReplyDescriptorPostQueueDepth = cpu_to_le16(ioc->reply_post_queue_depth); @@ -3512,9 +3680,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u32 hcb_size; printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name); - - _base_save_msix_table(ioc); - drsprintk(ioc, printk(MPT2SAS_INFO_FMT "clear interrupts\n", ioc->name)); @@ -3610,7 +3775,6 @@ _base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) goto out; } - _base_restore_msix_table(ioc); printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name); return 0; @@ -3691,6 +3855,9 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) u16 smid; struct _tr_list *delayed_tr, *delayed_tr_next; u8 hide_flag; + struct adapter_reply_queue *reply_q; + long reply_post_free; + u32 reply_post_free_sz; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); @@ -3756,19 +3923,43 @@ _base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag) ioc->reply_sz) ioc->reply_free[i] = cpu_to_le32(reply_address); + /* initialize reply queues */ + _base_assign_reply_queues(ioc); + /* initialize Reply Post Free Queue */ - for (i = 0; i < ioc->reply_post_queue_depth; i++) - ioc->reply_post_free[i].Words = cpu_to_le64(ULLONG_MAX); + reply_post_free = (long)ioc->reply_post_free; + reply_post_free_sz = ioc->reply_post_queue_depth * + sizeof(Mpi2DefaultReplyDescriptor_t); + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + reply_q->reply_post_host_index = 0; + reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *) + reply_post_free; + for (i = 0; i < ioc->reply_post_queue_depth; i++) + reply_q->reply_post_free[i].Words = + cpu_to_le64(ULLONG_MAX); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_free_queue; + reply_post_free += reply_post_free_sz; + } + skip_init_reply_post_free_queue: r = _base_send_ioc_init(ioc, sleep_flag); if (r) return r; - /* initialize the index's */ + /* initialize reply free host index */ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1; - ioc->reply_post_host_index = 0; writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex); - writel(0, &ioc->chip->ReplyPostHostIndex); + + /* initialize reply post host index */ + list_for_each_entry(reply_q, &ioc->reply_queue_list, list) { + writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT, + &ioc->chip->ReplyPostHostIndex); + if (!_base_is_controller_msix_enabled(ioc)) + goto skip_init_reply_post_host_index; + } + + skip_init_reply_post_host_index: _base_unmask_interrupts(ioc); r = _base_event_notification(ioc, sleep_flag); @@ -3819,14 +4010,10 @@ mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc) ioc->shost_recovery = 1; _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET); ioc->shost_recovery = 0; - if (ioc->pci_irq) { - synchronize_irq(pdev->irq); - free_irq(ioc->pci_irq, ioc); - } + _base_free_irq(ioc); _base_disable_msix(ioc); if (ioc->chip_phys) iounmap(ioc->chip); - ioc->pci_irq = -1; ioc->chip_phys = 0; pci_release_selected_regions(ioc->pdev, ioc->bars); pci_disable_pcie_error_reporting(pdev); @@ -3844,14 +4031,50 @@ int mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) { int r, i; + int cpu_id, last_cpu_id = 0; dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, __func__)); + /* setup cpu_msix_table */ + ioc->cpu_count = num_online_cpus(); + for_each_online_cpu(cpu_id) + last_cpu_id = cpu_id; + ioc->cpu_msix_table_sz = last_cpu_id + 1; + ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL); + ioc->reply_queue_count = 1; + if (!ioc->cpu_msix_table) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for " + "cpu_msix_table failed!!!\n", ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + + if (ioc->is_warpdrive) { + ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz, + sizeof(resource_size_t *), GFP_KERNEL); + if (!ioc->reply_post_host_index) { + dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation " + "for cpu_msix_table failed!!!\n", ioc->name)); + r = -ENOMEM; + goto out_free_resources; + } + } + r = mpt2sas_base_map_resources(ioc); if (r) return r; + if (ioc->is_warpdrive) { + ioc->reply_post_host_index[0] = + (resource_size_t *)&ioc->chip->ReplyPostHostIndex; + + for (i = 1; i < ioc->cpu_msix_table_sz; i++) + ioc->reply_post_host_index[i] = (resource_size_t *) + ((u8 *)&ioc->chip->Doorbell + (0x4000 + ((i - 1) + * 4))); + } + pci_set_drvdata(ioc->pdev, ioc->shost); r = _base_get_ioc_facts(ioc, CAN_SLEEP); if (r) @@ -3972,6 +4195,9 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc) mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->tm_cmds.reply); kfree(ioc->transport_cmds.reply); @@ -4009,6 +4235,9 @@ mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc) mpt2sas_base_free_resources(ioc); _base_release_memory_pools(ioc); pci_set_drvdata(ioc->pdev, NULL); + kfree(ioc->cpu_msix_table); + if (ioc->is_warpdrive) + kfree(ioc->reply_post_host_index); kfree(ioc->pd_handles); kfree(ioc->pfacts); kfree(ioc->ctl_cmds.reply); diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h index 8d5be2120c63..59354dba68c0 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_base.h +++ b/drivers/scsi/mpt2sas/mpt2sas_base.h @@ -69,11 +69,11 @@ #define MPT2SAS_DRIVER_NAME "mpt2sas" #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>" #define MPT2SAS_DESCRIPTION "LSI MPT Fusion SAS 2.0 Device Driver" -#define MPT2SAS_DRIVER_VERSION "09.100.00.00" +#define MPT2SAS_DRIVER_VERSION "09.100.00.01" #define MPT2SAS_MAJOR_VERSION 09 #define MPT2SAS_MINOR_VERSION 100 #define MPT2SAS_BUILD_VERSION 00 -#define MPT2SAS_RELEASE_VERSION 00 +#define MPT2SAS_RELEASE_VERSION 01 /* * Set MPT2SAS_SG_DEPTH value based on user input. @@ -544,6 +544,28 @@ struct _tr_list { typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr); +/** + * struct adapter_reply_queue - the reply queue struct + * @ioc: per adapter object + * @msix_index: msix index into vector table + * @vector: irq vector + * @reply_post_host_index: head index in the pool where FW completes IO + * @reply_post_free: reply post base virt address + * @name: the name registered to request_irq() + * @busy: isr is actively processing replies on another cpu + * @list: this list +*/ +struct adapter_reply_queue { + struct MPT2SAS_ADAPTER *ioc; + u8 msix_index; + unsigned int vector; + u32 reply_post_host_index; + Mpi2ReplyDescriptorsUnion_t *reply_post_free; + char name[MPT_NAME_LENGTH]; + atomic_t busy; + struct list_head list; +}; + /* IOC Facts and Port Facts converted from little endian to cpu */ union mpi2_version_union { MPI2_VERSION_STRUCT Struct; @@ -606,7 +628,7 @@ enum mutex_type { * @list: ioc_list * @shost: shost object * @id: unique adapter id - * @pci_irq: irq number + * @cpu_count: number online cpus * @name: generic ioc string * @tmp_string: tmp string used for logging * @pdev: pci pdev object @@ -636,8 +658,8 @@ enum mutex_type { * @wait_for_port_enable_to_complete: * @msix_enable: flag indicating msix is enabled * @msix_vector_count: number msix vectors - * @msix_table: virt address to the msix table - * @msix_table_backup: backup msix table + * @cpu_msix_table: table for mapping cpus to msix index + * @cpu_msix_table_sz: table size * @scsi_io_cb_idx: shost generated commands * @tm_cb_idx: task management commands * @scsih_cb_idx: scsih internal commands @@ -728,7 +750,8 @@ enum mutex_type { * @reply_post_queue_depth: reply post queue depth * @reply_post_free: pool for reply post (64bit descriptor) * @reply_post_free_dma: - * @reply_post_free_dma_pool: + * @reply_queue_count: number of reply queue's + * @reply_queue_list: link list contaning the reply queue info * @reply_post_host_index: head index in the pool where FW completes IO * @delayed_tr_list: target reset link list * @delayed_tr_volume_list: volume target reset link list @@ -737,7 +760,7 @@ struct MPT2SAS_ADAPTER { struct list_head list; struct Scsi_Host *shost; u8 id; - u32 pci_irq; + int cpu_count; char name[MPT_NAME_LENGTH]; char tmp_string[MPT_STRING_LENGTH]; struct pci_dev *pdev; @@ -779,8 +802,9 @@ struct MPT2SAS_ADAPTER { u8 msix_enable; u16 msix_vector_count; - u32 *msix_table; - u32 *msix_table_backup; + u8 *cpu_msix_table; + resource_size_t **reply_post_host_index; + u16 cpu_msix_table_sz; u32 ioc_reset_count; /* internal commands, callback index */ @@ -911,7 +935,8 @@ struct MPT2SAS_ADAPTER { Mpi2ReplyDescriptorsUnion_t *reply_post_free; dma_addr_t reply_post_free_dma; struct dma_pool *reply_post_free_dma_pool; - u32 reply_post_host_index; + u8 reply_queue_count; + struct list_head reply_queue_list; struct list_head delayed_tr_list; struct list_head delayed_tr_volume_list; @@ -955,6 +980,7 @@ void *mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid); void mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr); __le32 mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid); +void mpt2sas_base_flush_reply_queues(struct MPT2SAS_ADAPTER *ioc); /* hi-priority queue */ u16 mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx); diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c index 246d5fbc6e5a..9adb0133d6fb 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c +++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c @@ -2704,6 +2704,33 @@ _ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr, static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL); +/** + * _ctl_ioc_reply_queue_count_show - number of reply queues + * @cdev - pointer to embedded class device + * @buf - the buffer returned + * + * This is number of reply queues + * + * A sysfs 'read-only' shost attribute. + */ +static ssize_t +_ctl_ioc_reply_queue_count_show(struct device *cdev, + struct device_attribute *attr, char *buf) +{ + u8 reply_queue_count; + struct Scsi_Host *shost = class_to_shost(cdev); + struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); + + if ((ioc->facts.IOCCapabilities & + MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable) + reply_queue_count = ioc->reply_queue_count; + else + reply_queue_count = 1; + return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count); +} +static DEVICE_ATTR(reply_queue_count, S_IRUGO, + _ctl_ioc_reply_queue_count_show, NULL); + struct DIAG_BUFFER_START { __le32 Size; __le32 DiagVersion; @@ -2914,6 +2941,7 @@ struct device_attribute *mpt2sas_host_attrs[] = { &dev_attr_host_trace_buffer_size, &dev_attr_host_trace_buffer, &dev_attr_host_trace_buffer_enable, + &dev_attr_reply_queue_count, NULL, }; diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index 5202de3f3d3f..1da1aa1a11e2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -2161,6 +2161,7 @@ _scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) return 1; if (ioc->tm_cmds.smid != smid) return 1; + mpt2sas_base_flush_reply_queues(ioc); ioc->tm_cmds.status |= MPT2_CMD_COMPLETE; mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply); if (mpi_reply) { @@ -7353,6 +7354,7 @@ _scsih_remove(struct pci_dev *pdev) } sas_remove_host(shost); + mpt2sas_base_detach(ioc); list_del(&ioc->list); scsi_remove_host(shost); scsi_host_put(shost); diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c index 15c798026217..230732241aa2 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_transport.c +++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c @@ -163,7 +163,7 @@ _transport_set_identify(struct MPT2SAS_ADAPTER *ioc, u16 handle, return -EIO; } - memset(identify, 0, sizeof(identify)); + memset(identify, 0, sizeof(*identify)); device_info = le32_to_cpu(sas_device_pg0.DeviceInfo); /* sas_address */ diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c index 3501291618fd..7e423e5ad5e1 100644 --- a/drivers/scsi/mvsas/mv_94xx.c +++ b/drivers/scsi/mvsas/mv_94xx.c @@ -398,6 +398,16 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) /* init phys */ mvs_phy_hacks(mvi); + /* disable non data frame retry */ + tmp = mvs_cr32(mvi, CMD_SAS_CTL1); + if ((revision == VANIR_A0_REV) || + (revision == VANIR_B0_REV) || + (revision == VANIR_C0_REV)) { + tmp &= ~0xffff; + tmp |= 0x007f; + mvs_cw32(mvi, CMD_SAS_CTL1, tmp); + } + /* set LED blink when IO*/ mw32(MVS_PA_VSR_ADDR, VSR_PHY_ACT_LED); tmp = mr32(MVS_PA_VSR_PORT); @@ -500,6 +510,27 @@ static int __devinit mvs_94xx_init(struct mvs_info *mvi) tmp |= CINT_PHY_MASK; mw32(MVS_INT_MASK, tmp); + tmp = mvs_cr32(mvi, CMD_LINK_TIMER); + tmp |= 0xFFFF0000; + mvs_cw32(mvi, CMD_LINK_TIMER, tmp); + + /* tune STP performance */ + tmp = 0x003F003F; + mvs_cw32(mvi, CMD_PL_TIMER, tmp); + + /* This can improve expander large block size seq write performance */ + tmp = mvs_cr32(mvi, CMD_PORT_LAYER_TIMER1); + tmp |= 0xFFFF007F; + mvs_cw32(mvi, CMD_PORT_LAYER_TIMER1, tmp); + + /* change the connection open-close behavior (bit 9) + * set bit8 to 1 for performance tuning */ + tmp = mvs_cr32(mvi, CMD_SL_MODE0); + tmp |= 0x00000300; + /* set bit0 to 0 to enable retry for no_dest reject case */ + tmp &= 0xFFFFFFFE; + mvs_cw32(mvi, CMD_SL_MODE0, tmp); + /* Enable SRS interrupt */ mw32(MVS_INT_MASK_SRS_0, 0xFFFF); @@ -823,6 +854,10 @@ static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i, phy->att_dev_info = PORT_DEV_STP_TRGT | 1; } + /* enable spin up bit */ + mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + mvs_write_port_cfg_data(mvi, i, 0x04); + } void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id, diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h index dec7cadb7485..f5451940d289 100644 --- a/drivers/scsi/mvsas/mv_defs.h +++ b/drivers/scsi/mvsas/mv_defs.h @@ -387,6 +387,8 @@ enum sas_cmd_port_registers { CMD_SL_MODE0 = 0x1BC, /* SL Mode 0 */ CMD_SL_MODE1 = 0x1C0, /* SL Mode 1 */ CMD_PND_FIFO_CTL1 = 0x1C4, /* Pending FIFO Control 1 */ + CMD_PORT_LAYER_TIMER1 = 0x1E0, /* Port Layer Timer 1 */ + CMD_LINK_TIMER = 0x1E4, /* Link Timer */ }; enum mvs_info_flags { diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 4e9af66fd1d3..621b5e072758 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -59,7 +59,7 @@ static struct scsi_host_template mvs_sht = { .name = DRV_NAME, .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, - .slave_configure = mvs_slave_configure, + .slave_configure = sas_slave_configure, .slave_destroy = sas_slave_destroy, .scan_finished = mvs_scan_finished, .scan_start = mvs_scan_start, @@ -74,7 +74,7 @@ static struct scsi_host_template mvs_sht = { .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = mvs_slave_alloc, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = mvst_host_attrs, @@ -707,6 +707,15 @@ static struct pci_device_id __devinitdata mvs_pci_table[] = { { PCI_VDEVICE(TTI, 0x2760), chip_9480 }, { .vendor = 0x1b4b, + .device = 0x9480, + .subvendor = PCI_ANY_ID, + .subdevice = 0x9480, + .class = 0, + .class_mask = 0, + .driver_data = chip_9480, + }, + { + .vendor = 0x1b4b, .device = 0x9445, .subvendor = PCI_ANY_ID, .subdevice = 0x9480, diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index 4958fefff365..a4884a57cf79 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -214,7 +214,7 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, break; case PHY_FUNC_RELEASE_SPINUP_HOLD: default: - rc = -EOPNOTSUPP; + rc = -ENOSYS; } msleep(200); return rc; @@ -265,6 +265,12 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) id->dev_type = phy->identify.device_type; id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; + + /* direct attached SAS device */ + if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x00); + } } else if (phy->phy_type & PORT_TYPE_SATA) { /*Nothing*/ } @@ -276,36 +282,6 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i) PORTE_BYTES_DMAED); } -int mvs_slave_alloc(struct scsi_device *scsi_dev) -{ - struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - if (dev_is_sata(dev)) { - /* We don't need to rescan targets - * if REPORT_LUNS request is failed - */ - if (scsi_dev->lun > 0) - return -ENXIO; - scsi_dev->tagged_supported = 1; - } - - return sas_slave_alloc(scsi_dev); -} - -int mvs_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - - if (ret) - return ret; - if (!dev_is_sata(dev)) { - sas_change_queue_depth(sdev, - MVS_QUEUE_SIZE, - SCSI_QDEPTH_DEFAULT); - } - return 0; -} - void mvs_scan_start(struct Scsi_Host *shost) { int i, j; @@ -426,7 +402,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi, /* generate open address frame hdr (first 12 bytes) */ /* initiator, SMP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01; - buf_oaf[1] = dev->linkrate & 0xf; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = 0xFFFF; /* SAS SPEC */ memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); @@ -571,7 +547,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi, /* generate open address frame hdr (first 12 bytes) */ /* initiator, STP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1; - buf_oaf[1] = dev->linkrate & 0xf; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); @@ -679,7 +655,7 @@ static int mvs_task_prep_ssp(struct mvs_info *mvi, /* generate open address frame hdr (first 12 bytes) */ /* initiator, SSP, ftype 1h */ buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1; - buf_oaf[1] = dev->linkrate & 0xf; + buf_oaf[1] = min(sas_port->linkrate, dev->linkrate) & 0xf; *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1); memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE); @@ -1241,6 +1217,12 @@ static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock) port->wide_port_phymap = sas_port->phy_mask; mv_printk("set wide port phy map %x\n", sas_port->phy_mask); mvs_update_wideport(mvi, sas_phy->id); + + /* direct attached SAS device */ + if (phy->att_dev_info & PORT_SSP_TRGT_MASK) { + MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_PHY_STAT); + MVS_CHIP_DISP->write_port_cfg_data(mvi, i, 0x04); + } } if (lock) spin_unlock_irqrestore(&mvi->lock, flags); @@ -1387,28 +1369,6 @@ void mvs_dev_gone(struct domain_device *dev) mvs_dev_gone_notify(dev); } -static struct sas_task *mvs_alloc_task(void) -{ - struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL); - - if (task) { - INIT_LIST_HEAD(&task->list); - spin_lock_init(&task->task_state_lock); - task->task_state_flags = SAS_TASK_STATE_PENDING; - init_timer(&task->timer); - init_completion(&task->completion); - } - return task; -} - -static void mvs_free_task(struct sas_task *task) -{ - if (task) { - BUG_ON(!list_empty(&task->list)); - kfree(task); - } -} - static void mvs_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) @@ -1432,7 +1392,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { - task = mvs_alloc_task(); + task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -1490,15 +1450,14 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat); - mvs_free_task(task); + sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); - if (task != NULL) - mvs_free_task(task); + sas_free_task(task); return res; } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index 44b474513223..c04a4f5b5972 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -46,7 +46,7 @@ #include "mv_defs.h" #define DRV_NAME "mvsas" -#define DRV_VERSION "0.8.2" +#define DRV_VERSION "0.8.16" #define MVS_ID_NOT_MAPPED 0x7f #define WIDE_PORT_MAX_PHY 4 #define mv_printk(fmt, arg ...) \ @@ -458,8 +458,6 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo, u32 off_hi, u64 sas_addr); -int mvs_slave_alloc(struct scsi_device *scsi_dev); -int mvs_slave_configure(struct scsi_device *sdev); void mvs_scan_start(struct Scsi_Host *shost); int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time); int mvs_queue_command(struct sas_task *task, const int num, diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c new file mode 100644 index 000000000000..88cf1db21a79 --- /dev/null +++ b/drivers/scsi/mvumi.c @@ -0,0 +1,2018 @@ +/* + * Marvell UMI driver + * + * Copyright 2011 Marvell. <jyli@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA +*/ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/device.h> +#include <linux/pci.h> +#include <linux/list.h> +#include <linux/spinlock.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/blkdev.h> +#include <linux/io.h> +#include <scsi/scsi.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_host.h> +#include <scsi/scsi_transport.h> +#include <scsi/scsi_eh.h> +#include <linux/uaccess.h> + +#include "mvumi.h" + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("jyli@marvell.com"); +MODULE_DESCRIPTION("Marvell UMI Driver"); + +static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = { + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) }, + { 0 } +}; + +MODULE_DEVICE_TABLE(pci, mvumi_pci_table); + +static void tag_init(struct mvumi_tag *st, unsigned short size) +{ + unsigned short i; + BUG_ON(size != st->size); + st->top = size; + for (i = 0; i < size; i++) + st->stack[i] = size - 1 - i; +} + +static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st) +{ + BUG_ON(st->top <= 0); + return st->stack[--st->top]; +} + +static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st, + unsigned short tag) +{ + BUG_ON(st->top >= st->size); + st->stack[st->top++] = tag; +} + +static bool tag_is_empty(struct mvumi_tag *st) +{ + if (st->top == 0) + return 1; + else + return 0; +} + +static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array) +{ + int i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) + if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) && + addr_array[i]) + pci_iounmap(dev, addr_array[i]); +} + +static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array) +{ + int i; + + for (i = 0; i < MAX_BASE_ADDRESS; i++) { + if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { + addr_array[i] = pci_iomap(dev, i, 0); + if (!addr_array[i]) { + dev_err(&dev->dev, "failed to map Bar[%d]\n", + i); + mvumi_unmap_pci_addr(dev, addr_array); + return -ENOMEM; + } + } else + addr_array[i] = NULL; + + dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]); + } + + return 0; +} + +static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba, + enum resource_type type, unsigned int size) +{ + struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL); + + if (!res) { + dev_err(&mhba->pdev->dev, + "Failed to allocate memory for resouce manager.\n"); + return NULL; + } + + switch (type) { + case RESOURCE_CACHED_MEMORY: + res->virt_addr = kzalloc(size, GFP_KERNEL); + if (!res->virt_addr) { + dev_err(&mhba->pdev->dev, + "unable to allocate memory,size = %d.\n", size); + kfree(res); + return NULL; + } + break; + + case RESOURCE_UNCACHED_MEMORY: + size = round_up(size, 8); + res->virt_addr = pci_alloc_consistent(mhba->pdev, size, + &res->bus_addr); + if (!res->virt_addr) { + dev_err(&mhba->pdev->dev, + "unable to allocate consistent mem," + "size = %d.\n", size); + kfree(res); + return NULL; + } + memset(res->virt_addr, 0, size); + break; + + default: + dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type); + kfree(res); + return NULL; + } + + res->type = type; + res->size = size; + INIT_LIST_HEAD(&res->entry); + list_add_tail(&res->entry, &mhba->res_list); + + return res; +} + +static void mvumi_release_mem_resource(struct mvumi_hba *mhba) +{ + struct mvumi_res *res, *tmp; + + list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) { + switch (res->type) { + case RESOURCE_UNCACHED_MEMORY: + pci_free_consistent(mhba->pdev, res->size, + res->virt_addr, res->bus_addr); + break; + case RESOURCE_CACHED_MEMORY: + kfree(res->virt_addr); + break; + default: + dev_err(&mhba->pdev->dev, + "unknown resource type %d\n", res->type); + break; + } + list_del(&res->entry); + kfree(res); + } + mhba->fw_flag &= ~MVUMI_FW_ALLOC; +} + +/** + * mvumi_make_sgl - Prepares SGL + * @mhba: Adapter soft state + * @scmd: SCSI command from the mid-layer + * @sgl_p: SGL to be filled in + * @sg_count return the number of SG elements + * + * If successful, this function returns 0. otherwise, it returns -1. + */ +static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd, + void *sgl_p, unsigned char *sg_count) +{ + struct scatterlist *sg; + struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p; + unsigned int i; + unsigned int sgnum = scsi_sg_count(scmd); + dma_addr_t busaddr; + + if (sgnum) { + sg = scsi_sglist(scmd); + *sg_count = pci_map_sg(mhba->pdev, sg, sgnum, + (int) scmd->sc_data_direction); + if (*sg_count > mhba->max_sge) { + dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger " + "than max sg[0x%x].\n", + *sg_count, mhba->max_sge); + return -1; + } + for (i = 0; i < *sg_count; i++) { + busaddr = sg_dma_address(&sg[i]); + m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); + m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); + m_sg->flags = 0; + m_sg->size = cpu_to_le32(sg_dma_len(&sg[i])); + if ((i + 1) == *sg_count) + m_sg->flags |= SGD_EOT; + + m_sg++; + } + } else { + scmd->SCp.dma_handle = scsi_bufflen(scmd) ? + pci_map_single(mhba->pdev, scsi_sglist(scmd), + scsi_bufflen(scmd), + (int) scmd->sc_data_direction) + : 0; + busaddr = scmd->SCp.dma_handle; + m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr)); + m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr)); + m_sg->flags = SGD_EOT; + m_sg->size = cpu_to_le32(scsi_bufflen(scmd)); + *sg_count = 1; + } + + return 0; +} + +static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, + unsigned int size) +{ + struct mvumi_sgl *m_sg; + void *virt_addr; + dma_addr_t phy_addr; + + if (size == 0) + return 0; + + virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr); + if (!virt_addr) + return -1; + + memset(virt_addr, 0, size); + + m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; + cmd->frame->sg_counts = 1; + cmd->data_buf = virt_addr; + + m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr)); + m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr)); + m_sg->flags = SGD_EOT; + m_sg->size = cpu_to_le32(size); + + return 0; +} + +static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba, + unsigned int buf_size) +{ + struct mvumi_cmd *cmd; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) { + dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n"); + return NULL; + } + INIT_LIST_HEAD(&cmd->queue_pointer); + + cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); + if (!cmd->frame) { + dev_err(&mhba->pdev->dev, "failed to allocate memory for FW" + " frame,size = %d.\n", mhba->ib_max_size); + kfree(cmd); + return NULL; + } + + if (buf_size) { + if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) { + dev_err(&mhba->pdev->dev, "failed to allocate memory" + " for internal frame\n"); + kfree(cmd->frame); + kfree(cmd); + return NULL; + } + } else + cmd->frame->sg_counts = 0; + + return cmd; +} + +static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + struct mvumi_sgl *m_sg; + unsigned int size; + dma_addr_t phy_addr; + + if (cmd && cmd->frame) { + if (cmd->frame->sg_counts) { + m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0]; + size = m_sg->size; + + phy_addr = (dma_addr_t) m_sg->baseaddr_l | + (dma_addr_t) ((m_sg->baseaddr_h << 16) << 16); + + pci_free_consistent(mhba->pdev, size, cmd->data_buf, + phy_addr); + } + kfree(cmd->frame); + kfree(cmd); + } +} + +/** + * mvumi_get_cmd - Get a command from the free pool + * @mhba: Adapter soft state + * + * Returns a free command from the pool + */ +static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd = NULL; + + if (likely(!list_empty(&mhba->cmd_pool))) { + cmd = list_entry((&mhba->cmd_pool)->next, + struct mvumi_cmd, queue_pointer); + list_del_init(&cmd->queue_pointer); + } else + dev_warn(&mhba->pdev->dev, "command pool is empty!\n"); + + return cmd; +} + +/** + * mvumi_return_cmd - Return a cmd to free command pool + * @mhba: Adapter soft state + * @cmd: Command packet to be returned to free command pool + */ +static inline void mvumi_return_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + cmd->scmd = NULL; + list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); +} + +/** + * mvumi_free_cmds - Free all the cmds in the free cmd pool + * @mhba: Adapter soft state + */ +static void mvumi_free_cmds(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd; + + while (!list_empty(&mhba->cmd_pool)) { + cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, + queue_pointer); + list_del(&cmd->queue_pointer); + kfree(cmd->frame); + kfree(cmd); + } +} + +/** + * mvumi_alloc_cmds - Allocates the command packets + * @mhba: Adapter soft state + * + */ +static int mvumi_alloc_cmds(struct mvumi_hba *mhba) +{ + int i; + struct mvumi_cmd *cmd; + + for (i = 0; i < mhba->max_io; i++) { + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + goto err_exit; + + INIT_LIST_HEAD(&cmd->queue_pointer); + list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool); + cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL); + if (!cmd->frame) + goto err_exit; + } + return 0; + +err_exit: + dev_err(&mhba->pdev->dev, + "failed to allocate memory for cmd[0x%x].\n", i); + while (!list_empty(&mhba->cmd_pool)) { + cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd, + queue_pointer); + list_del(&cmd->queue_pointer); + kfree(cmd->frame); + kfree(cmd); + } + return -ENOMEM; +} + +static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry) +{ + unsigned int ib_rp_reg, cur_ib_entry; + + if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) { + dev_warn(&mhba->pdev->dev, "firmware io overflow.\n"); + return -1; + } + ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER); + + if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) == + (mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) && + ((ib_rp_reg & CL_POINTER_TOGGLE) != + (mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) { + dev_warn(&mhba->pdev->dev, "no free slot to use.\n"); + return -1; + } + + cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK; + cur_ib_entry++; + if (cur_ib_entry >= mhba->list_num_io) { + cur_ib_entry -= mhba->list_num_io; + mhba->ib_cur_slot ^= CL_POINTER_TOGGLE; + } + mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK; + mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK); + *ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size; + atomic_inc(&mhba->fw_outstanding); + + return 0; +} + +static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba) +{ + iowrite32(0xfff, mhba->ib_shadow); + iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER); +} + +static char mvumi_check_ob_frame(struct mvumi_hba *mhba, + unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame) +{ + unsigned short tag, request_id; + + udelay(1); + p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; + request_id = p_outb_frame->request_id; + tag = p_outb_frame->tag; + if (tag > mhba->tag_pool.size) { + dev_err(&mhba->pdev->dev, "ob frame data error\n"); + return -1; + } + if (mhba->tag_cmd[tag] == NULL) { + dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag); + return -1; + } else if (mhba->tag_cmd[tag]->request_id != request_id && + mhba->request_id_enabled) { + dev_err(&mhba->pdev->dev, "request ID from FW:0x%x," + "cmd request ID:0x%x\n", request_id, + mhba->tag_cmd[tag]->request_id); + return -1; + } + + return 0; +} + +static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba) +{ + unsigned int ob_write_reg, ob_write_shadow_reg; + unsigned int cur_obf, assign_obf_end, i; + struct mvumi_ob_data *ob_data; + struct mvumi_rsp_frame *p_outb_frame; + + do { + ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER); + ob_write_shadow_reg = ioread32(mhba->ob_shadow); + } while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg); + + cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK; + assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK; + + if ((ob_write_reg & CL_POINTER_TOGGLE) != + (mhba->ob_cur_slot & CL_POINTER_TOGGLE)) { + assign_obf_end += mhba->list_num_io; + } + + for (i = (assign_obf_end - cur_obf); i != 0; i--) { + cur_obf++; + if (cur_obf >= mhba->list_num_io) { + cur_obf -= mhba->list_num_io; + mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; + } + + p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size; + + /* Copy pointer may point to entry in outbound list + * before entry has valid data + */ + if (unlikely(p_outb_frame->tag > mhba->tag_pool.size || + mhba->tag_cmd[p_outb_frame->tag] == NULL || + p_outb_frame->request_id != + mhba->tag_cmd[p_outb_frame->tag]->request_id)) + if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame)) + continue; + + if (!list_empty(&mhba->ob_data_list)) { + ob_data = (struct mvumi_ob_data *) + list_first_entry(&mhba->ob_data_list, + struct mvumi_ob_data, list); + list_del_init(&ob_data->list); + } else { + ob_data = NULL; + if (cur_obf == 0) { + cur_obf = mhba->list_num_io - 1; + mhba->ob_cur_slot ^= CL_POINTER_TOGGLE; + } else + cur_obf -= 1; + break; + } + + memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size); + p_outb_frame->tag = 0xff; + + list_add_tail(&ob_data->list, &mhba->free_ob_list); + } + mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK; + mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK); + iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER); +} + +static void mvumi_reset(void *regs) +{ + iowrite32(0, regs + CPU_ENPOINTA_MASK_REG); + if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE) + return; + + iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG); +} + +static unsigned char mvumi_start(struct mvumi_hba *mhba); + +static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba) +{ + mhba->fw_state = FW_STATE_ABORT; + mvumi_reset(mhba->mmio); + + if (mvumi_start(mhba)) + return FAILED; + else + return SUCCESS; +} + +static int mvumi_host_reset(struct scsi_cmnd *scmd) +{ + struct mvumi_hba *mhba; + + mhba = (struct mvumi_hba *) scmd->device->host->hostdata; + + scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n", + scmd->serial_number, scmd->cmnd[0], scmd->retries); + + return mvumi_wait_for_outstanding(mhba); +} + +static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + unsigned long flags; + + cmd->cmd_status = REQ_STATUS_PENDING; + + if (atomic_read(&cmd->sync_cmd)) { + dev_err(&mhba->pdev->dev, + "last blocked cmd not finished, sync_cmd = %d\n", + atomic_read(&cmd->sync_cmd)); + BUG_ON(1); + return -1; + } + atomic_inc(&cmd->sync_cmd); + spin_lock_irqsave(mhba->shost->host_lock, flags); + mhba->instancet->fire_cmd(mhba, cmd); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + + wait_event_timeout(mhba->int_cmd_wait_q, + (cmd->cmd_status != REQ_STATUS_PENDING), + MVUMI_INTERNAL_CMD_WAIT_TIME * HZ); + + /* command timeout */ + if (atomic_read(&cmd->sync_cmd)) { + spin_lock_irqsave(mhba->shost->host_lock, flags); + atomic_dec(&cmd->sync_cmd); + if (mhba->tag_cmd[cmd->frame->tag]) { + mhba->tag_cmd[cmd->frame->tag] = 0; + dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n", + cmd->frame->tag); + tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); + } + if (!list_empty(&cmd->queue_pointer)) { + dev_warn(&mhba->pdev->dev, + "TIMEOUT:A internal command doesn't send!\n"); + list_del_init(&cmd->queue_pointer); + } else + atomic_dec(&mhba->fw_outstanding); + + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + } + return 0; +} + +static void mvumi_release_fw(struct mvumi_hba *mhba) +{ + mvumi_free_cmds(mhba); + mvumi_release_mem_resource(mhba); + mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); + kfree(mhba->handshake_page); + pci_release_regions(mhba->pdev); +} + +static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba) +{ + struct mvumi_cmd *cmd; + struct mvumi_msg_frame *frame; + unsigned char device_id, retry = 0; + unsigned char bitcount = sizeof(unsigned char) * 8; + + for (device_id = 0; device_id < mhba->max_target_id; device_id++) { + if (!(mhba->target_map[device_id / bitcount] & + (1 << (device_id % bitcount)))) + continue; +get_cmd: cmd = mvumi_create_internal_cmd(mhba, 0); + if (!cmd) { + if (retry++ >= 5) { + dev_err(&mhba->pdev->dev, "failed to get memory" + " for internal flush cache cmd for " + "device %d", device_id); + retry = 0; + continue; + } else + goto get_cmd; + } + cmd->scmd = NULL; + cmd->cmd_status = REQ_STATUS_PENDING; + atomic_set(&cmd->sync_cmd, 0); + frame = cmd->frame; + frame->req_function = CL_FUN_SCSI_CMD; + frame->device_id = device_id; + frame->cmd_flag = CMD_FLAG_NON_DATA; + frame->data_transfer_length = 0; + frame->cdb_length = MAX_COMMAND_SIZE; + memset(frame->cdb, 0, MAX_COMMAND_SIZE); + frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC; + frame->cdb[2] = CDB_CORE_SHUTDOWN; + + mvumi_issue_blocked_cmd(mhba, cmd); + if (cmd->cmd_status != SAM_STAT_GOOD) { + dev_err(&mhba->pdev->dev, + "device %d flush cache failed, status=0x%x.\n", + device_id, cmd->cmd_status); + } + + mvumi_delete_internal_cmd(mhba, cmd); + } + return 0; +} + +static unsigned char +mvumi_calculate_checksum(struct mvumi_hs_header *p_header, + unsigned short len) +{ + unsigned char *ptr; + unsigned char ret = 0, i; + + ptr = (unsigned char *) p_header->frame_content; + for (i = 0; i < len; i++) { + ret ^= *ptr; + ptr++; + } + + return ret; +} + +void mvumi_hs_build_page(struct mvumi_hba *mhba, + struct mvumi_hs_header *hs_header) +{ + struct mvumi_hs_page2 *hs_page2; + struct mvumi_hs_page4 *hs_page4; + struct mvumi_hs_page3 *hs_page3; + struct timeval time; + unsigned int local_time; + + switch (hs_header->page_code) { + case HS_PAGE_HOST_INFO: + hs_page2 = (struct mvumi_hs_page2 *) hs_header; + hs_header->frame_length = sizeof(*hs_page2) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_page2->host_type = 3; /* 3 mean linux*/ + hs_page2->host_ver.ver_major = VER_MAJOR; + hs_page2->host_ver.ver_minor = VER_MINOR; + hs_page2->host_ver.ver_oem = VER_OEM; + hs_page2->host_ver.ver_build = VER_BUILD; + hs_page2->system_io_bus = 0; + hs_page2->slot_number = 0; + hs_page2->intr_level = 0; + hs_page2->intr_vector = 0; + do_gettimeofday(&time); + local_time = (unsigned int) (time.tv_sec - + (sys_tz.tz_minuteswest * 60)); + hs_page2->seconds_since1970 = local_time; + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + case HS_PAGE_FIRM_CTL: + hs_page3 = (struct mvumi_hs_page3 *) hs_header; + hs_header->frame_length = sizeof(*hs_page3) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + case HS_PAGE_CL_INFO: + hs_page4 = (struct mvumi_hs_page4 *) hs_header; + hs_header->frame_length = sizeof(*hs_page4) - 4; + memset(hs_header->frame_content, 0, hs_header->frame_length); + hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys); + hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys); + + hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys); + hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys); + hs_page4->ib_entry_size = mhba->ib_max_size_setting; + hs_page4->ob_entry_size = mhba->ob_max_size_setting; + hs_page4->ob_depth = mhba->list_num_io; + hs_page4->ib_depth = mhba->list_num_io; + hs_header->checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + break; + + default: + dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n", + hs_header->page_code); + break; + } +} + +/** + * mvumi_init_data - Initialize requested date for FW + * @mhba: Adapter soft state + */ +static int mvumi_init_data(struct mvumi_hba *mhba) +{ + struct mvumi_ob_data *ob_pool; + struct mvumi_res *res_mgnt; + unsigned int tmp_size, offset, i; + void *virmem, *v; + dma_addr_t p; + + if (mhba->fw_flag & MVUMI_FW_ALLOC) + return 0; + + tmp_size = mhba->ib_max_size * mhba->max_io; + tmp_size += 128 + mhba->ob_max_size * mhba->max_io; + tmp_size += 8 + sizeof(u32) + 16; + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_UNCACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for inbound list\n"); + goto fail_alloc_dma_buf; + } + + p = res_mgnt->bus_addr; + v = res_mgnt->virt_addr; + /* ib_list */ + offset = round_up(p, 128) - p; + p += offset; + v += offset; + mhba->ib_list = v; + mhba->ib_list_phys = p; + v += mhba->ib_max_size * mhba->max_io; + p += mhba->ib_max_size * mhba->max_io; + /* ib shadow */ + offset = round_up(p, 8) - p; + p += offset; + v += offset; + mhba->ib_shadow = v; + mhba->ib_shadow_phys = p; + p += sizeof(u32); + v += sizeof(u32); + /* ob shadow */ + offset = round_up(p, 8) - p; + p += offset; + v += offset; + mhba->ob_shadow = v; + mhba->ob_shadow_phys = p; + p += 8; + v += 8; + + /* ob list */ + offset = round_up(p, 128) - p; + p += offset; + v += offset; + + mhba->ob_list = v; + mhba->ob_list_phys = p; + + /* ob data pool */ + tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool)); + tmp_size = round_up(tmp_size, 8); + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_CACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for outbound data buffer\n"); + goto fail_alloc_dma_buf; + } + virmem = res_mgnt->virt_addr; + + for (i = mhba->max_io; i != 0; i--) { + ob_pool = (struct mvumi_ob_data *) virmem; + list_add_tail(&ob_pool->list, &mhba->ob_data_list); + virmem += mhba->ob_max_size + sizeof(*ob_pool); + } + + tmp_size = sizeof(unsigned short) * mhba->max_io + + sizeof(struct mvumi_cmd *) * mhba->max_io; + tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) / + (sizeof(unsigned char) * 8); + + res_mgnt = mvumi_alloc_mem_resource(mhba, + RESOURCE_CACHED_MEMORY, tmp_size); + if (!res_mgnt) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for tag and target map\n"); + goto fail_alloc_dma_buf; + } + + virmem = res_mgnt->virt_addr; + mhba->tag_pool.stack = virmem; + mhba->tag_pool.size = mhba->max_io; + tag_init(&mhba->tag_pool, mhba->max_io); + virmem += sizeof(unsigned short) * mhba->max_io; + + mhba->tag_cmd = virmem; + virmem += sizeof(struct mvumi_cmd *) * mhba->max_io; + + mhba->target_map = virmem; + + mhba->fw_flag |= MVUMI_FW_ALLOC; + return 0; + +fail_alloc_dma_buf: + mvumi_release_mem_resource(mhba); + return -1; +} + +static int mvumi_hs_process_page(struct mvumi_hba *mhba, + struct mvumi_hs_header *hs_header) +{ + struct mvumi_hs_page1 *hs_page1; + unsigned char page_checksum; + + page_checksum = mvumi_calculate_checksum(hs_header, + hs_header->frame_length); + if (page_checksum != hs_header->checksum) { + dev_err(&mhba->pdev->dev, "checksum error\n"); + return -1; + } + + switch (hs_header->page_code) { + case HS_PAGE_FIRM_CAP: + hs_page1 = (struct mvumi_hs_page1 *) hs_header; + + mhba->max_io = hs_page1->max_io_support; + mhba->list_num_io = hs_page1->cl_inout_list_depth; + mhba->max_transfer_size = hs_page1->max_transfer_size; + mhba->max_target_id = hs_page1->max_devices_support; + mhba->hba_capability = hs_page1->capability; + mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size; + mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2; + + mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size; + mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2; + + dev_dbg(&mhba->pdev->dev, "FW version:%d\n", + hs_page1->fw_ver.ver_build); + + break; + default: + dev_err(&mhba->pdev->dev, "handshake: page code error\n"); + return -1; + } + return 0; +} + +/** + * mvumi_handshake - Move the FW to READY state + * @mhba: Adapter soft state + * + * During the initialization, FW passes can potentially be in any one of + * several possible states. If the FW in operational, waiting-for-handshake + * states, driver must take steps to bring it to ready state. Otherwise, it + * has to wait for the ready state. + */ +static int mvumi_handshake(struct mvumi_hba *mhba) +{ + unsigned int hs_state, tmp, hs_fun; + struct mvumi_hs_header *hs_header; + void *regs = mhba->mmio; + + if (mhba->fw_state == FW_STATE_STARTING) + hs_state = HS_S_START; + else { + tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0); + hs_state = HS_GET_STATE(tmp); + dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state); + if (HS_GET_STATUS(tmp) != HS_STATUS_OK) { + mhba->fw_state = FW_STATE_STARTING; + return -1; + } + } + + hs_fun = 0; + switch (hs_state) { + case HS_S_START: + mhba->fw_state = FW_STATE_HANDSHAKING; + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + HS_SET_STATE(hs_fun, HS_S_RESET); + iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1); + iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); + iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); + break; + + case HS_S_RESET: + iowrite32(lower_32_bits(mhba->handshake_page_phys), + regs + CPU_PCIEA_TO_ARM_MSG1); + iowrite32(upper_32_bits(mhba->handshake_page_phys), + regs + CPU_ARM_TO_PCIEA_MSG1); + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR); + iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); + iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); + + break; + + case HS_S_PAGE_ADDR: + case HS_S_QUERY_PAGE: + case HS_S_SEND_PAGE: + hs_header = (struct mvumi_hs_header *) mhba->handshake_page; + if (hs_header->page_code == HS_PAGE_FIRM_CAP) { + mhba->hba_total_pages = + ((struct mvumi_hs_page1 *) hs_header)->total_pages; + + if (mhba->hba_total_pages == 0) + mhba->hba_total_pages = HS_PAGE_TOTAL-1; + } + + if (hs_state == HS_S_QUERY_PAGE) { + if (mvumi_hs_process_page(mhba, hs_header)) { + HS_SET_STATE(hs_fun, HS_S_ABORT); + return -1; + } + if (mvumi_init_data(mhba)) { + HS_SET_STATE(hs_fun, HS_S_ABORT); + return -1; + } + } else if (hs_state == HS_S_PAGE_ADDR) { + hs_header->page_code = 0; + mhba->hba_total_pages = HS_PAGE_TOTAL-1; + } + + if ((hs_header->page_code + 1) <= mhba->hba_total_pages) { + hs_header->page_code++; + if (hs_header->page_code != HS_PAGE_FIRM_CAP) { + mvumi_hs_build_page(mhba, hs_header); + HS_SET_STATE(hs_fun, HS_S_SEND_PAGE); + } else + HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE); + } else + HS_SET_STATE(hs_fun, HS_S_END); + + HS_SET_STATUS(hs_fun, HS_STATUS_OK); + iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0); + iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG); + break; + + case HS_S_END: + /* Set communication list ISR */ + tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG); + tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR; + iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); + iowrite32(mhba->list_num_io, mhba->ib_shadow); + /* Set InBound List Avaliable count shadow */ + iowrite32(lower_32_bits(mhba->ib_shadow_phys), + regs + CLA_INB_AVAL_COUNT_BASEL); + iowrite32(upper_32_bits(mhba->ib_shadow_phys), + regs + CLA_INB_AVAL_COUNT_BASEH); + + /* Set OutBound List Avaliable count shadow */ + iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE, + mhba->ob_shadow); + iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0); + iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4); + + mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; + mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE; + mhba->fw_state = FW_STATE_STARTED; + + break; + default: + dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n", + hs_state); + return -1; + } + return 0; +} + +static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba) +{ + unsigned int isr_status; + unsigned long before; + + before = jiffies; + mvumi_handshake(mhba); + do { + isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio); + + if (mhba->fw_state == FW_STATE_STARTED) + return 0; + if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { + dev_err(&mhba->pdev->dev, + "no handshake response at state 0x%x.\n", + mhba->fw_state); + dev_err(&mhba->pdev->dev, + "isr : global=0x%x,status=0x%x.\n", + mhba->global_isr, isr_status); + return -1; + } + rmb(); + usleep_range(1000, 2000); + } while (!(isr_status & DRBL_HANDSHAKE_ISR)); + + return 0; +} + +static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba) +{ + void *regs = mhba->mmio; + unsigned int tmp; + unsigned long before; + + before = jiffies; + tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); + while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) { + if (tmp != HANDSHAKE_READYSTATE) + iowrite32(DRBL_MU_RESET, + regs + CPU_PCIEA_TO_ARM_DRBL_REG); + if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) { + dev_err(&mhba->pdev->dev, + "invalid signature [0x%x].\n", tmp); + return -1; + } + usleep_range(1000, 2000); + rmb(); + tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1); + } + + mhba->fw_state = FW_STATE_STARTING; + dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n"); + do { + if (mvumi_handshake_event(mhba)) { + dev_err(&mhba->pdev->dev, + "handshake failed at state 0x%x.\n", + mhba->fw_state); + return -1; + } + } while (mhba->fw_state != FW_STATE_STARTED); + + dev_dbg(&mhba->pdev->dev, "firmware handshake done\n"); + + return 0; +} + +static unsigned char mvumi_start(struct mvumi_hba *mhba) +{ + void *regs = mhba->mmio; + unsigned int tmp; + /* clear Door bell */ + tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); + iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG); + + iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); + tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA; + iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG); + if (mvumi_check_handshake(mhba)) + return -1; + + return 0; +} + +/** + * mvumi_complete_cmd - Completes a command + * @mhba: Adapter soft state + * @cmd: Command to be completed + */ +static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd, + struct mvumi_rsp_frame *ob_frame) +{ + struct scsi_cmnd *scmd = cmd->scmd; + + cmd->scmd->SCp.ptr = NULL; + scmd->result = ob_frame->req_status; + + switch (ob_frame->req_status) { + case SAM_STAT_GOOD: + scmd->result |= DID_OK << 16; + break; + case SAM_STAT_BUSY: + scmd->result |= DID_BUS_BUSY << 16; + break; + case SAM_STAT_CHECK_CONDITION: + scmd->result |= (DID_OK << 16); + if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) { + memcpy(cmd->scmd->sense_buffer, ob_frame->payload, + sizeof(struct mvumi_sense_data)); + scmd->result |= (DRIVER_SENSE << 24); + } + break; + default: + scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16); + break; + } + + if (scsi_bufflen(scmd)) { + if (scsi_sg_count(scmd)) { + pci_unmap_sg(mhba->pdev, + scsi_sglist(scmd), + scsi_sg_count(scmd), + (int) scmd->sc_data_direction); + } else { + pci_unmap_single(mhba->pdev, + scmd->SCp.dma_handle, + scsi_bufflen(scmd), + (int) scmd->sc_data_direction); + + scmd->SCp.dma_handle = 0; + } + } + cmd->scmd->scsi_done(scmd); + mvumi_return_cmd(mhba, cmd); +} +static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd, + struct mvumi_rsp_frame *ob_frame) +{ + if (atomic_read(&cmd->sync_cmd)) { + cmd->cmd_status = ob_frame->req_status; + + if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) && + (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) && + cmd->data_buf) { + memcpy(cmd->data_buf, ob_frame->payload, + sizeof(struct mvumi_sense_data)); + } + atomic_dec(&cmd->sync_cmd); + wake_up(&mhba->int_cmd_wait_q); + } +} + +static void mvumi_show_event(struct mvumi_hba *mhba, + struct mvumi_driver_event *ptr) +{ + unsigned int i; + + dev_warn(&mhba->pdev->dev, + "Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n", + ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id); + if (ptr->param_count) { + printk(KERN_WARNING "Event param(len 0x%x): ", + ptr->param_count); + for (i = 0; i < ptr->param_count; i++) + printk(KERN_WARNING "0x%x ", ptr->params[i]); + + printk(KERN_WARNING "\n"); + } + + if (ptr->sense_data_length) { + printk(KERN_WARNING "Event sense data(len 0x%x): ", + ptr->sense_data_length); + for (i = 0; i < ptr->sense_data_length; i++) + printk(KERN_WARNING "0x%x ", ptr->sense_data[i]); + printk(KERN_WARNING "\n"); + } +} + +static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer) +{ + if (msg == APICDB1_EVENT_GETEVENT) { + int i, count; + struct mvumi_driver_event *param = NULL; + struct mvumi_event_req *er = buffer; + count = er->count; + if (count > MAX_EVENTS_RETURNED) { + dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger" + " than max event count[0x%x].\n", + count, MAX_EVENTS_RETURNED); + return; + } + for (i = 0; i < count; i++) { + param = &er->events[i]; + mvumi_show_event(mhba, param); + } + } +} + +static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg) +{ + struct mvumi_cmd *cmd; + struct mvumi_msg_frame *frame; + + cmd = mvumi_create_internal_cmd(mhba, 512); + if (!cmd) + return -1; + cmd->scmd = NULL; + cmd->cmd_status = REQ_STATUS_PENDING; + atomic_set(&cmd->sync_cmd, 0); + frame = cmd->frame; + frame->device_id = 0; + frame->cmd_flag = CMD_FLAG_DATA_IN; + frame->req_function = CL_FUN_SCSI_CMD; + frame->cdb_length = MAX_COMMAND_SIZE; + frame->data_transfer_length = sizeof(struct mvumi_event_req); + memset(frame->cdb, 0, MAX_COMMAND_SIZE); + frame->cdb[0] = APICDB0_EVENT; + frame->cdb[1] = msg; + mvumi_issue_blocked_cmd(mhba, cmd); + + if (cmd->cmd_status != SAM_STAT_GOOD) + dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n", + cmd->cmd_status); + else + mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf); + + mvumi_delete_internal_cmd(mhba, cmd); + return 0; +} + +static void mvumi_scan_events(struct work_struct *work) +{ + struct mvumi_events_wq *mu_ev = + container_of(work, struct mvumi_events_wq, work_q); + + mvumi_get_event(mu_ev->mhba, mu_ev->event); + kfree(mu_ev); +} + +static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg) +{ + struct mvumi_events_wq *mu_ev; + + mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC); + if (mu_ev) { + INIT_WORK(&mu_ev->work_q, mvumi_scan_events); + mu_ev->mhba = mhba; + mu_ev->event = msg; + mu_ev->param = NULL; + schedule_work(&mu_ev->work_q); + } +} + +static void mvumi_handle_clob(struct mvumi_hba *mhba) +{ + struct mvumi_rsp_frame *ob_frame; + struct mvumi_cmd *cmd; + struct mvumi_ob_data *pool; + + while (!list_empty(&mhba->free_ob_list)) { + pool = list_first_entry(&mhba->free_ob_list, + struct mvumi_ob_data, list); + list_del_init(&pool->list); + list_add_tail(&pool->list, &mhba->ob_data_list); + + ob_frame = (struct mvumi_rsp_frame *) &pool->data[0]; + cmd = mhba->tag_cmd[ob_frame->tag]; + + atomic_dec(&mhba->fw_outstanding); + mhba->tag_cmd[ob_frame->tag] = 0; + tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag); + if (cmd->scmd) + mvumi_complete_cmd(mhba, cmd, ob_frame); + else + mvumi_complete_internal_cmd(mhba, cmd, ob_frame); + } + mhba->instancet->fire_cmd(mhba, NULL); +} + +static irqreturn_t mvumi_isr_handler(int irq, void *devp) +{ + struct mvumi_hba *mhba = (struct mvumi_hba *) devp; + unsigned long flags; + + spin_lock_irqsave(mhba->shost->host_lock, flags); + if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) { + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + return IRQ_NONE; + } + + if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) { + if (mhba->isr_status & DRBL_HANDSHAKE_ISR) { + dev_warn(&mhba->pdev->dev, "enter handshake again!\n"); + mvumi_handshake(mhba); + } + if (mhba->isr_status & DRBL_EVENT_NOTIFY) + mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT); + } + + if (mhba->global_isr & INT_MAP_COMAOUT) + mvumi_receive_ob_list_entry(mhba); + + mhba->global_isr = 0; + mhba->isr_status = 0; + if (mhba->fw_state == FW_STATE_STARTED) + mvumi_handle_clob(mhba); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + return IRQ_HANDLED; +} + +static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba, + struct mvumi_cmd *cmd) +{ + void *ib_entry; + struct mvumi_msg_frame *ib_frame; + unsigned int frame_len; + + ib_frame = cmd->frame; + if (unlikely(mhba->fw_state != FW_STATE_STARTED)) { + dev_dbg(&mhba->pdev->dev, "firmware not ready.\n"); + return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; + } + if (tag_is_empty(&mhba->tag_pool)) { + dev_dbg(&mhba->pdev->dev, "no free tag.\n"); + return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; + } + if (mvumi_get_ib_list_entry(mhba, &ib_entry)) + return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE; + + cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool); + cmd->frame->request_id = mhba->io_seq++; + cmd->request_id = cmd->frame->request_id; + mhba->tag_cmd[cmd->frame->tag] = cmd; + frame_len = sizeof(*ib_frame) - 4 + + ib_frame->sg_counts * sizeof(struct mvumi_sgl); + memcpy(ib_entry, ib_frame, frame_len); + return MV_QUEUE_COMMAND_RESULT_SENT; +} + +static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd) +{ + unsigned short num_of_cl_sent = 0; + enum mvumi_qc_result result; + + if (cmd) + list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list); + + while (!list_empty(&mhba->waiting_req_list)) { + cmd = list_first_entry(&mhba->waiting_req_list, + struct mvumi_cmd, queue_pointer); + list_del_init(&cmd->queue_pointer); + result = mvumi_send_command(mhba, cmd); + switch (result) { + case MV_QUEUE_COMMAND_RESULT_SENT: + num_of_cl_sent++; + break; + case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE: + list_add(&cmd->queue_pointer, &mhba->waiting_req_list); + if (num_of_cl_sent > 0) + mvumi_send_ib_list_entry(mhba); + + return; + } + } + if (num_of_cl_sent > 0) + mvumi_send_ib_list_entry(mhba); +} + +/** + * mvumi_enable_intr - Enables interrupts + * @regs: FW register set + */ +static void mvumi_enable_intr(void *regs) +{ + unsigned int mask; + + iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG); + mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); + mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR; + iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); +} + +/** + * mvumi_disable_intr -Disables interrupt + * @regs: FW register set + */ +static void mvumi_disable_intr(void *regs) +{ + unsigned int mask; + + iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG); + mask = ioread32(regs + CPU_ENPOINTA_MASK_REG); + mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR); + iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG); +} + +static int mvumi_clear_intr(void *extend) +{ + struct mvumi_hba *mhba = (struct mvumi_hba *) extend; + unsigned int status, isr_status = 0, tmp = 0; + void *regs = mhba->mmio; + + status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG); + if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF) + return 1; + if (unlikely(status & INT_MAP_COMAERR)) { + tmp = ioread32(regs + CLA_ISR_CAUSE); + if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ)) + iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ), + regs + CLA_ISR_CAUSE); + status ^= INT_MAP_COMAERR; + /* inbound or outbound parity error, command will timeout */ + } + if (status & INT_MAP_COMAOUT) { + tmp = ioread32(regs + CLA_ISR_CAUSE); + if (tmp & CLIC_OUT_IRQ) + iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE); + } + if (status & INT_MAP_DL_CPU2PCIEA) { + isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); + if (isr_status) + iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); + } + + mhba->global_isr = status; + mhba->isr_status = isr_status; + + return 0; +} + +/** + * mvumi_read_fw_status_reg - returns the current FW status value + * @regs: FW register set + */ +static unsigned int mvumi_read_fw_status_reg(void *regs) +{ + unsigned int status; + + status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG); + if (status) + iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG); + return status; +} + +static struct mvumi_instance_template mvumi_instance_template = { + .fire_cmd = mvumi_fire_cmd, + .enable_intr = mvumi_enable_intr, + .disable_intr = mvumi_disable_intr, + .clear_intr = mvumi_clear_intr, + .read_fw_status_reg = mvumi_read_fw_status_reg, +}; + +static int mvumi_slave_configure(struct scsi_device *sdev) +{ + struct mvumi_hba *mhba; + unsigned char bitcount = sizeof(unsigned char) * 8; + + mhba = (struct mvumi_hba *) sdev->host->hostdata; + if (sdev->id >= mhba->max_target_id) + return -EINVAL; + + mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount)); + return 0; +} + +/** + * mvumi_build_frame - Prepares a direct cdb (DCDB) command + * @mhba: Adapter soft state + * @scmd: SCSI command + * @cmd: Command to be prepared in + * + * This function prepares CDB commands. These are typcially pass-through + * commands to the devices. + */ +static unsigned char mvumi_build_frame(struct mvumi_hba *mhba, + struct scsi_cmnd *scmd, struct mvumi_cmd *cmd) +{ + struct mvumi_msg_frame *pframe; + + cmd->scmd = scmd; + cmd->cmd_status = REQ_STATUS_PENDING; + pframe = cmd->frame; + pframe->device_id = ((unsigned short) scmd->device->id) | + (((unsigned short) scmd->device->lun) << 8); + pframe->cmd_flag = 0; + + switch (scmd->sc_data_direction) { + case DMA_NONE: + pframe->cmd_flag |= CMD_FLAG_NON_DATA; + break; + case DMA_FROM_DEVICE: + pframe->cmd_flag |= CMD_FLAG_DATA_IN; + break; + case DMA_TO_DEVICE: + pframe->cmd_flag |= CMD_FLAG_DATA_OUT; + break; + case DMA_BIDIRECTIONAL: + default: + dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] " + "cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]); + goto error; + } + + pframe->cdb_length = scmd->cmd_len; + memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length); + pframe->req_function = CL_FUN_SCSI_CMD; + if (scsi_bufflen(scmd)) { + if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0], + &pframe->sg_counts)) + goto error; + + pframe->data_transfer_length = scsi_bufflen(scmd); + } else { + pframe->sg_counts = 0; + pframe->data_transfer_length = 0; + } + return 0; + +error: + scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) | + SAM_STAT_CHECK_CONDITION; + scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24, + 0); + return -1; +} + +/** + * mvumi_queue_command - Queue entry point + * @scmd: SCSI command to be queued + * @done: Callback entry point + */ +static int mvumi_queue_command(struct Scsi_Host *shost, + struct scsi_cmnd *scmd) +{ + struct mvumi_cmd *cmd; + struct mvumi_hba *mhba; + unsigned long irq_flags; + + spin_lock_irqsave(shost->host_lock, irq_flags); + scsi_cmd_get_serial(shost, scmd); + + mhba = (struct mvumi_hba *) shost->hostdata; + scmd->result = 0; + cmd = mvumi_get_cmd(mhba); + if (unlikely(!cmd)) { + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return SCSI_MLQUEUE_HOST_BUSY; + } + + if (unlikely(mvumi_build_frame(mhba, scmd, cmd))) + goto out_return_cmd; + + cmd->scmd = scmd; + scmd->SCp.ptr = (char *) cmd; + mhba->instancet->fire_cmd(mhba, cmd); + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return 0; + +out_return_cmd: + mvumi_return_cmd(mhba, cmd); + scmd->scsi_done(scmd); + spin_unlock_irqrestore(shost->host_lock, irq_flags); + return 0; +} + +static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd) +{ + struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr; + struct Scsi_Host *host = scmd->device->host; + struct mvumi_hba *mhba = shost_priv(host); + unsigned long flags; + + spin_lock_irqsave(mhba->shost->host_lock, flags); + + if (mhba->tag_cmd[cmd->frame->tag]) { + mhba->tag_cmd[cmd->frame->tag] = 0; + tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag); + } + if (!list_empty(&cmd->queue_pointer)) + list_del_init(&cmd->queue_pointer); + else + atomic_dec(&mhba->fw_outstanding); + + scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16); + scmd->SCp.ptr = NULL; + if (scsi_bufflen(scmd)) { + if (scsi_sg_count(scmd)) { + pci_unmap_sg(mhba->pdev, + scsi_sglist(scmd), + scsi_sg_count(scmd), + (int)scmd->sc_data_direction); + } else { + pci_unmap_single(mhba->pdev, + scmd->SCp.dma_handle, + scsi_bufflen(scmd), + (int)scmd->sc_data_direction); + + scmd->SCp.dma_handle = 0; + } + } + mvumi_return_cmd(mhba, cmd); + spin_unlock_irqrestore(mhba->shost->host_lock, flags); + + return BLK_EH_NOT_HANDLED; +} + +static int +mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev, + sector_t capacity, int geom[]) +{ + int heads, sectors; + sector_t cylinders; + unsigned long tmp; + + heads = 64; + sectors = 32; + tmp = heads * sectors; + cylinders = capacity; + sector_div(cylinders, tmp); + + if (capacity >= 0x200000) { + heads = 255; + sectors = 63; + tmp = heads * sectors; + cylinders = capacity; + sector_div(cylinders, tmp); + } + geom[0] = heads; + geom[1] = sectors; + geom[2] = cylinders; + + return 0; +} + +static struct scsi_host_template mvumi_template = { + + .module = THIS_MODULE, + .name = "Marvell Storage Controller", + .slave_configure = mvumi_slave_configure, + .queuecommand = mvumi_queue_command, + .eh_host_reset_handler = mvumi_host_reset, + .bios_param = mvumi_bios_param, + .this_id = -1, +}; + +static struct scsi_transport_template mvumi_transport_template = { + .eh_timed_out = mvumi_timed_out, +}; + +/** + * mvumi_init_fw - Initializes the FW + * @mhba: Adapter soft state + * + * This is the main function for initializing firmware. + */ +static int mvumi_init_fw(struct mvumi_hba *mhba) +{ + int ret = 0; + + if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) { + dev_err(&mhba->pdev->dev, "IO memory region busy!\n"); + return -EBUSY; + } + ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); + if (ret) + goto fail_ioremap; + + mhba->mmio = mhba->base_addr[0]; + + switch (mhba->pdev->device) { + case PCI_DEVICE_ID_MARVELL_MV9143: + mhba->instancet = &mvumi_instance_template; + mhba->io_seq = 0; + mhba->max_sge = MVUMI_MAX_SG_ENTRY; + mhba->request_id_enabled = 1; + break; + default: + dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n", + mhba->pdev->device); + mhba->instancet = NULL; + ret = -EINVAL; + goto fail_alloc_mem; + } + dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n", + mhba->pdev->device); + + mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL); + if (!mhba->handshake_page) { + dev_err(&mhba->pdev->dev, + "failed to allocate memory for handshake\n"); + ret = -ENOMEM; + goto fail_alloc_mem; + } + mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page); + + if (mvumi_start(mhba)) { + ret = -EINVAL; + goto fail_ready_state; + } + ret = mvumi_alloc_cmds(mhba); + if (ret) + goto fail_ready_state; + + return 0; + +fail_ready_state: + mvumi_release_mem_resource(mhba); + kfree(mhba->handshake_page); +fail_alloc_mem: + mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr); +fail_ioremap: + pci_release_regions(mhba->pdev); + + return ret; +} + +/** + * mvumi_io_attach - Attaches this driver to SCSI mid-layer + * @mhba: Adapter soft state + */ +static int mvumi_io_attach(struct mvumi_hba *mhba) +{ + struct Scsi_Host *host = mhba->shost; + int ret; + unsigned int max_sg = (mhba->ib_max_size + 4 - + sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl); + + host->irq = mhba->pdev->irq; + host->unique_id = mhba->unique_id; + host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; + host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge; + host->max_sectors = mhba->max_transfer_size / 512; + host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1; + host->max_id = mhba->max_target_id; + host->max_cmd_len = MAX_COMMAND_SIZE; + host->transportt = &mvumi_transport_template; + + ret = scsi_add_host(host, &mhba->pdev->dev); + if (ret) { + dev_err(&mhba->pdev->dev, "scsi_add_host failed\n"); + return ret; + } + mhba->fw_flag |= MVUMI_FW_ATTACH; + scsi_scan_host(host); + + return 0; +} + +/** + * mvumi_probe_one - PCI hotplug entry point + * @pdev: PCI device structure + * @id: PCI ids of supported hotplugged adapter + */ +static int __devinit mvumi_probe_one(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct Scsi_Host *host; + struct mvumi_hba *mhba; + int ret; + + dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ", + pdev->vendor, pdev->device, pdev->subsystem_vendor, + pdev->subsystem_device); + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + pci_set_master(pdev); + + if (IS_DMA64) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + goto fail_set_dma_mask; + } + } else { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + goto fail_set_dma_mask; + } + + host = scsi_host_alloc(&mvumi_template, sizeof(*mhba)); + if (!host) { + dev_err(&pdev->dev, "scsi_host_alloc failed\n"); + ret = -ENOMEM; + goto fail_alloc_instance; + } + mhba = shost_priv(host); + + INIT_LIST_HEAD(&mhba->cmd_pool); + INIT_LIST_HEAD(&mhba->ob_data_list); + INIT_LIST_HEAD(&mhba->free_ob_list); + INIT_LIST_HEAD(&mhba->res_list); + INIT_LIST_HEAD(&mhba->waiting_req_list); + atomic_set(&mhba->fw_outstanding, 0); + init_waitqueue_head(&mhba->int_cmd_wait_q); + + mhba->pdev = pdev; + mhba->shost = host; + mhba->unique_id = pdev->bus->number << 8 | pdev->devfn; + + ret = mvumi_init_fw(mhba); + if (ret) + goto fail_init_fw; + + ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, + "mvumi", mhba); + if (ret) { + dev_err(&pdev->dev, "failed to register IRQ\n"); + goto fail_init_irq; + } + mhba->instancet->enable_intr(mhba->mmio); + pci_set_drvdata(pdev, mhba); + + ret = mvumi_io_attach(mhba); + if (ret) + goto fail_io_attach; + dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n"); + + return 0; + +fail_io_attach: + pci_set_drvdata(pdev, NULL); + mhba->instancet->disable_intr(mhba->mmio); + free_irq(mhba->pdev->irq, mhba); +fail_init_irq: + mvumi_release_fw(mhba); +fail_init_fw: + scsi_host_put(host); + +fail_alloc_instance: +fail_set_dma_mask: + pci_disable_device(pdev); + + return ret; +} + +static void mvumi_detach_one(struct pci_dev *pdev) +{ + struct Scsi_Host *host; + struct mvumi_hba *mhba; + + mhba = pci_get_drvdata(pdev); + host = mhba->shost; + scsi_remove_host(mhba->shost); + mvumi_flush_cache(mhba); + + mhba->instancet->disable_intr(mhba->mmio); + free_irq(mhba->pdev->irq, mhba); + mvumi_release_fw(mhba); + scsi_host_put(host); + pci_set_drvdata(pdev, NULL); + pci_disable_device(pdev); + dev_dbg(&pdev->dev, "driver is removed!\n"); +} + +/** + * mvumi_shutdown - Shutdown entry point + * @device: Generic device structure + */ +static void mvumi_shutdown(struct pci_dev *pdev) +{ + struct mvumi_hba *mhba = pci_get_drvdata(pdev); + + mvumi_flush_cache(mhba); +} + +static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct mvumi_hba *mhba = NULL; + + mhba = pci_get_drvdata(pdev); + mvumi_flush_cache(mhba); + + pci_set_drvdata(pdev, mhba); + mhba->instancet->disable_intr(mhba->mmio); + free_irq(mhba->pdev->irq, mhba); + mvumi_unmap_pci_addr(pdev, mhba->base_addr); + pci_release_regions(pdev); + pci_save_state(pdev); + pci_disable_device(pdev); + pci_set_power_state(pdev, pci_choose_state(pdev, state)); + + return 0; +} + +static int mvumi_resume(struct pci_dev *pdev) +{ + int ret; + struct mvumi_hba *mhba = NULL; + + mhba = pci_get_drvdata(pdev); + + pci_set_power_state(pdev, PCI_D0); + pci_enable_wake(pdev, PCI_D0, 0); + pci_restore_state(pdev); + + ret = pci_enable_device(pdev); + if (ret) { + dev_err(&pdev->dev, "enable device failed\n"); + return ret; + } + pci_set_master(pdev); + if (IS_DMA64) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + goto fail; + } + } else { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret) + goto fail; + } + ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME); + if (ret) + goto fail; + ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr); + if (ret) + goto release_regions; + + mhba->mmio = mhba->base_addr[0]; + mvumi_reset(mhba->mmio); + + if (mvumi_start(mhba)) { + ret = -EINVAL; + goto unmap_pci_addr; + } + + ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED, + "mvumi", mhba); + if (ret) { + dev_err(&pdev->dev, "failed to register IRQ\n"); + goto unmap_pci_addr; + } + mhba->instancet->enable_intr(mhba->mmio); + + return 0; + +unmap_pci_addr: + mvumi_unmap_pci_addr(pdev, mhba->base_addr); +release_regions: + pci_release_regions(pdev); +fail: + pci_disable_device(pdev); + + return ret; +} + +static struct pci_driver mvumi_pci_driver = { + + .name = MV_DRIVER_NAME, + .id_table = mvumi_pci_table, + .probe = mvumi_probe_one, + .remove = __devexit_p(mvumi_detach_one), + .shutdown = mvumi_shutdown, +#ifdef CONFIG_PM + .suspend = mvumi_suspend, + .resume = mvumi_resume, +#endif +}; + +/** + * mvumi_init - Driver load entry point + */ +static int __init mvumi_init(void) +{ + return pci_register_driver(&mvumi_pci_driver); +} + +/** + * mvumi_exit - Driver unload entry point + */ +static void __exit mvumi_exit(void) +{ + + pci_unregister_driver(&mvumi_pci_driver); +} + +module_init(mvumi_init); +module_exit(mvumi_exit); diff --git a/drivers/scsi/mvumi.h b/drivers/scsi/mvumi.h new file mode 100644 index 000000000000..10b9237566f0 --- /dev/null +++ b/drivers/scsi/mvumi.h @@ -0,0 +1,505 @@ +/* + * Marvell UMI head file + * + * Copyright 2011 Marvell. <jyli@marvell.com> + * + * This file is licensed under GPLv2. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; version 2 of the + * License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 + * USA + */ + +#ifndef MVUMI_H +#define MVUMI_H + +#define MAX_BASE_ADDRESS 6 + +#define VER_MAJOR 1 +#define VER_MINOR 1 +#define VER_OEM 0 +#define VER_BUILD 1500 + +#define MV_DRIVER_NAME "mvumi" +#define PCI_VENDOR_ID_MARVELL_2 0x1b4b +#define PCI_DEVICE_ID_MARVELL_MV9143 0x9143 + +#define MVUMI_INTERNAL_CMD_WAIT_TIME 45 + +#define IS_DMA64 (sizeof(dma_addr_t) == 8) + +enum mvumi_qc_result { + MV_QUEUE_COMMAND_RESULT_SENT = 0, + MV_QUEUE_COMMAND_RESULT_NO_RESOURCE, +}; + +enum { + /*******************************************/ + + /* ARM Mbus Registers Map */ + + /*******************************************/ + CPU_MAIN_INT_CAUSE_REG = 0x20200, + CPU_MAIN_IRQ_MASK_REG = 0x20204, + CPU_MAIN_FIQ_MASK_REG = 0x20208, + CPU_ENPOINTA_MASK_REG = 0x2020C, + CPU_ENPOINTB_MASK_REG = 0x20210, + + INT_MAP_COMAERR = 1 << 6, + INT_MAP_COMAIN = 1 << 7, + INT_MAP_COMAOUT = 1 << 8, + INT_MAP_COMBERR = 1 << 9, + INT_MAP_COMBIN = 1 << 10, + INT_MAP_COMBOUT = 1 << 11, + + INT_MAP_COMAINT = (INT_MAP_COMAOUT | INT_MAP_COMAERR), + INT_MAP_COMBINT = (INT_MAP_COMBOUT | INT_MAP_COMBIN | INT_MAP_COMBERR), + + INT_MAP_DL_PCIEA2CPU = 1 << 0, + INT_MAP_DL_CPU2PCIEA = 1 << 1, + + /***************************************/ + + /* ARM Doorbell Registers Map */ + + /***************************************/ + CPU_PCIEA_TO_ARM_DRBL_REG = 0x20400, + CPU_PCIEA_TO_ARM_MASK_REG = 0x20404, + CPU_ARM_TO_PCIEA_DRBL_REG = 0x20408, + CPU_ARM_TO_PCIEA_MASK_REG = 0x2040C, + + DRBL_HANDSHAKE = 1 << 0, + DRBL_SOFT_RESET = 1 << 1, + DRBL_BUS_CHANGE = 1 << 2, + DRBL_EVENT_NOTIFY = 1 << 3, + DRBL_MU_RESET = 1 << 4, + DRBL_HANDSHAKE_ISR = DRBL_HANDSHAKE, + + CPU_PCIEA_TO_ARM_MSG0 = 0x20430, + CPU_PCIEA_TO_ARM_MSG1 = 0x20434, + CPU_ARM_TO_PCIEA_MSG0 = 0x20438, + CPU_ARM_TO_PCIEA_MSG1 = 0x2043C, + + /*******************************************/ + + /* ARM Communication List Registers Map */ + + /*******************************************/ + CLA_INB_LIST_BASEL = 0x500, + CLA_INB_LIST_BASEH = 0x504, + CLA_INB_AVAL_COUNT_BASEL = 0x508, + CLA_INB_AVAL_COUNT_BASEH = 0x50C, + CLA_INB_DESTI_LIST_BASEL = 0x510, + CLA_INB_DESTI_LIST_BASEH = 0x514, + CLA_INB_WRITE_POINTER = 0x518, + CLA_INB_READ_POINTER = 0x51C, + + CLA_OUTB_LIST_BASEL = 0x530, + CLA_OUTB_LIST_BASEH = 0x534, + CLA_OUTB_SOURCE_LIST_BASEL = 0x538, + CLA_OUTB_SOURCE_LIST_BASEH = 0x53C, + CLA_OUTB_COPY_POINTER = 0x544, + CLA_OUTB_READ_POINTER = 0x548, + + CLA_ISR_CAUSE = 0x560, + CLA_ISR_MASK = 0x564, + + INT_MAP_MU = (INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAINT), + + CL_POINTER_TOGGLE = 1 << 12, + + CLIC_IN_IRQ = 1 << 0, + CLIC_OUT_IRQ = 1 << 1, + CLIC_IN_ERR_IRQ = 1 << 8, + CLIC_OUT_ERR_IRQ = 1 << 12, + + CL_SLOT_NUM_MASK = 0xFFF, + + /* + * Command flag is the flag for the CDB command itself + */ + /* 1-non data; 0-data command */ + CMD_FLAG_NON_DATA = 1 << 0, + CMD_FLAG_DMA = 1 << 1, + CMD_FLAG_PIO = 1 << 2, + /* 1-host read data */ + CMD_FLAG_DATA_IN = 1 << 3, + /* 1-host write data */ + CMD_FLAG_DATA_OUT = 1 << 4, + + SCSI_CMD_MARVELL_SPECIFIC = 0xE1, + CDB_CORE_SHUTDOWN = 0xB, +}; + +#define APICDB0_EVENT 0xF4 +#define APICDB1_EVENT_GETEVENT 0 +#define MAX_EVENTS_RETURNED 6 + +struct mvumi_driver_event { + u32 time_stamp; + u32 sequence_no; + u32 event_id; + u8 severity; + u8 param_count; + u16 device_id; + u32 params[4]; + u8 sense_data_length; + u8 Reserved1; + u8 sense_data[30]; +}; + +struct mvumi_event_req { + unsigned char count; + unsigned char reserved[3]; + struct mvumi_driver_event events[MAX_EVENTS_RETURNED]; +}; + +struct mvumi_events_wq { + struct work_struct work_q; + struct mvumi_hba *mhba; + unsigned int event; + void *param; +}; + +#define MVUMI_MAX_SG_ENTRY 32 +#define SGD_EOT (1L << 27) + +struct mvumi_sgl { + u32 baseaddr_l; + u32 baseaddr_h; + u32 flags; + u32 size; +}; + +struct mvumi_res { + struct list_head entry; + dma_addr_t bus_addr; + void *virt_addr; + unsigned int size; + unsigned short type; /* enum Resource_Type */ +}; + +/* Resource type */ +enum resource_type { + RESOURCE_CACHED_MEMORY = 0, + RESOURCE_UNCACHED_MEMORY +}; + +struct mvumi_sense_data { + u8 error_eode:7; + u8 valid:1; + u8 segment_number; + u8 sense_key:4; + u8 reserved:1; + u8 incorrect_length:1; + u8 end_of_media:1; + u8 file_mark:1; + u8 information[4]; + u8 additional_sense_length; + u8 command_specific_information[4]; + u8 additional_sense_code; + u8 additional_sense_code_qualifier; + u8 field_replaceable_unit_code; + u8 sense_key_specific[3]; +}; + +/* Request initiator must set the status to REQ_STATUS_PENDING. */ +#define REQ_STATUS_PENDING 0x80 + +struct mvumi_cmd { + struct list_head queue_pointer; + struct mvumi_msg_frame *frame; + struct scsi_cmnd *scmd; + atomic_t sync_cmd; + void *data_buf; + unsigned short request_id; + unsigned char cmd_status; +}; + +/* + * the function type of the in bound frame + */ +#define CL_FUN_SCSI_CMD 0x1 + +struct mvumi_msg_frame { + u16 device_id; + u16 tag; + u8 cmd_flag; + u8 req_function; + u8 cdb_length; + u8 sg_counts; + u32 data_transfer_length; + u16 request_id; + u16 reserved1; + u8 cdb[MAX_COMMAND_SIZE]; + u32 payload[1]; +}; + +/* + * the respond flag for data_payload of the out bound frame + */ +#define CL_RSP_FLAG_NODATA 0x0 +#define CL_RSP_FLAG_SENSEDATA 0x1 + +struct mvumi_rsp_frame { + u16 device_id; + u16 tag; + u8 req_status; + u8 rsp_flag; /* Indicates the type of Data_Payload.*/ + u16 request_id; + u32 payload[1]; +}; + +struct mvumi_ob_data { + struct list_head list; + unsigned char data[0]; +}; + +struct version_info { + u32 ver_major; + u32 ver_minor; + u32 ver_oem; + u32 ver_build; +}; + +#define FW_MAX_DELAY 30 +#define MVUMI_FW_BUSY (1U << 0) +#define MVUMI_FW_ATTACH (1U << 1) +#define MVUMI_FW_ALLOC (1U << 2) + +/* + * State is the state of the MU + */ +#define FW_STATE_IDLE 0 +#define FW_STATE_STARTING 1 +#define FW_STATE_HANDSHAKING 2 +#define FW_STATE_STARTED 3 +#define FW_STATE_ABORT 4 + +#define HANDSHAKE_SIGNATURE 0x5A5A5A5AL +#define HANDSHAKE_READYSTATE 0x55AA5AA5L +#define HANDSHAKE_DONESTATE 0x55AAA55AL + +/* HandShake Status definition */ +#define HS_STATUS_OK 1 +#define HS_STATUS_ERR 2 +#define HS_STATUS_INVALID 3 + +/* HandShake State/Cmd definition */ +#define HS_S_START 1 +#define HS_S_RESET 2 +#define HS_S_PAGE_ADDR 3 +#define HS_S_QUERY_PAGE 4 +#define HS_S_SEND_PAGE 5 +#define HS_S_END 6 +#define HS_S_ABORT 7 +#define HS_PAGE_VERIFY_SIZE 128 + +#define HS_GET_STATE(a) (a & 0xFFFF) +#define HS_GET_STATUS(a) ((a & 0xFFFF0000) >> 16) +#define HS_SET_STATE(a, b) (a |= (b & 0xFFFF)) +#define HS_SET_STATUS(a, b) (a |= ((b & 0xFFFF) << 16)) + +/* handshake frame */ +struct mvumi_hs_frame { + u16 size; + /* host information */ + u8 host_type; + u8 reserved_1[1]; + struct version_info host_ver; /* bios or driver version */ + + /* controller information */ + u32 system_io_bus; + u32 slot_number; + u32 intr_level; + u32 intr_vector; + + /* communication list configuration */ + u32 ib_baseaddr_l; + u32 ib_baseaddr_h; + u32 ob_baseaddr_l; + u32 ob_baseaddr_h; + + u8 ib_entry_size; + u8 ob_entry_size; + u8 ob_depth; + u8 ib_depth; + + /* system time */ + u64 seconds_since1970; +}; + +struct mvumi_hs_header { + u8 page_code; + u8 checksum; + u16 frame_length; + u32 frame_content[1]; +}; + +/* + * the page code type of the handshake header + */ +#define HS_PAGE_FIRM_CAP 0x1 +#define HS_PAGE_HOST_INFO 0x2 +#define HS_PAGE_FIRM_CTL 0x3 +#define HS_PAGE_CL_INFO 0x4 +#define HS_PAGE_TOTAL 0x5 + +#define HSP_SIZE(i) sizeof(struct mvumi_hs_page##i) + +#define HSP_MAX_SIZE ({ \ + int size, m1, m2; \ + m1 = max(HSP_SIZE(1), HSP_SIZE(3)); \ + m2 = max(HSP_SIZE(2), HSP_SIZE(4)); \ + size = max(m1, m2); \ + size; \ +}) + +/* The format of the page code for Firmware capability */ +struct mvumi_hs_page1 { + u8 pagecode; + u8 checksum; + u16 frame_length; + + u16 number_of_ports; + u16 max_devices_support; + u16 max_io_support; + u16 umi_ver; + u32 max_transfer_size; + struct version_info fw_ver; + u8 cl_in_max_entry_size; + u8 cl_out_max_entry_size; + u8 cl_inout_list_depth; + u8 total_pages; + u16 capability; + u16 reserved1; +}; + +/* The format of the page code for Host information */ +struct mvumi_hs_page2 { + u8 pagecode; + u8 checksum; + u16 frame_length; + + u8 host_type; + u8 reserved[3]; + struct version_info host_ver; + u32 system_io_bus; + u32 slot_number; + u32 intr_level; + u32 intr_vector; + u64 seconds_since1970; +}; + +/* The format of the page code for firmware control */ +struct mvumi_hs_page3 { + u8 pagecode; + u8 checksum; + u16 frame_length; + u16 control; + u8 reserved[2]; + u32 host_bufferaddr_l; + u32 host_bufferaddr_h; + u32 host_eventaddr_l; + u32 host_eventaddr_h; +}; + +struct mvumi_hs_page4 { + u8 pagecode; + u8 checksum; + u16 frame_length; + u32 ib_baseaddr_l; + u32 ib_baseaddr_h; + u32 ob_baseaddr_l; + u32 ob_baseaddr_h; + u8 ib_entry_size; + u8 ob_entry_size; + u8 ob_depth; + u8 ib_depth; +}; + +struct mvumi_tag { + unsigned short *stack; + unsigned short top; + unsigned short size; +}; + +struct mvumi_hba { + void *base_addr[MAX_BASE_ADDRESS]; + void *mmio; + struct list_head cmd_pool; + struct Scsi_Host *shost; + wait_queue_head_t int_cmd_wait_q; + struct pci_dev *pdev; + unsigned int unique_id; + atomic_t fw_outstanding; + struct mvumi_instance_template *instancet; + + void *ib_list; + dma_addr_t ib_list_phys; + + void *ob_list; + dma_addr_t ob_list_phys; + + void *ib_shadow; + dma_addr_t ib_shadow_phys; + + void *ob_shadow; + dma_addr_t ob_shadow_phys; + + void *handshake_page; + dma_addr_t handshake_page_phys; + + unsigned int global_isr; + unsigned int isr_status; + + unsigned short max_sge; + unsigned short max_target_id; + unsigned char *target_map; + unsigned int max_io; + unsigned int list_num_io; + unsigned int ib_max_size; + unsigned int ob_max_size; + unsigned int ib_max_size_setting; + unsigned int ob_max_size_setting; + unsigned int max_transfer_size; + unsigned char hba_total_pages; + unsigned char fw_flag; + unsigned char request_id_enabled; + unsigned short hba_capability; + unsigned short io_seq; + + unsigned int ib_cur_slot; + unsigned int ob_cur_slot; + unsigned int fw_state; + + struct list_head ob_data_list; + struct list_head free_ob_list; + struct list_head res_list; + struct list_head waiting_req_list; + + struct mvumi_tag tag_pool; + struct mvumi_cmd **tag_cmd; +}; + +struct mvumi_instance_template { + void (*fire_cmd)(struct mvumi_hba *, struct mvumi_cmd *); + void (*enable_intr)(void *) ; + void (*disable_intr)(void *); + int (*clear_intr)(void *); + unsigned int (*read_fw_status_reg)(void *); +}; + +extern struct timezone sys_tz; +#endif diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 8b7db1e53c10..b7b92f7be2aa 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -567,11 +567,11 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) value = pm8001_cr32(pm8001_ha, 0, 0x44); offset = value & 0x03FFFFFF; PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("Scratchpad 0 Offset: %x \n", offset)); + pm8001_printk("Scratchpad 0 Offset: %x\n", offset)); pcilogic = (value & 0xFC000000) >> 26; pcibar = get_pci_bar_index(pcilogic); PM8001_INIT_DBG(pm8001_ha, - pm8001_printk("Scratchpad 0 PCI BAR: %d \n", pcibar)); + pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); pm8001_ha->main_cfg_tbl_addr = base_addr = pm8001_ha->io_mem[pcibar].memvirtaddr + offset; pm8001_ha->general_stat_tbl_addr = @@ -1245,7 +1245,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, - pm8001_printk("No free mpi buffer \n")); + pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); @@ -1262,7 +1262,7 @@ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, - pm8001_printk("after PI= %d CI= %d \n", circularQ->producer_idx, + pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, circularQ->consumer_index)); return 0; } @@ -1474,7 +1474,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" - ",param = %d \n", param)); + ",param = %d\n", param)); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; @@ -1490,14 +1490,14 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) break; case IO_ABORTED: PM8001_IO_DBG(pm8001_ha, - pm8001_printk("IO_ABORTED IOMB Tag \n")); + pm8001_printk("IO_ABORTED IOMB Tag\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; break; case IO_UNDERFLOW: /* SSP Completion with error */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW" - ",param = %d \n", param)); + ",param = %d\n", param)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; ts->residual = param; @@ -1649,6 +1649,7 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; + break; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); @@ -1937,14 +1938,14 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->buf_valid_size = sizeof(*resp); } else PM8001_IO_DBG(pm8001_ha, - pm8001_printk("response to large \n")); + pm8001_printk("response to large\n")); } if (pm8001_dev) pm8001_dev->running_req--; break; case IO_ABORTED: PM8001_IO_DBG(pm8001_ha, - pm8001_printk("IO_ABORTED IOMB Tag \n")); + pm8001_printk("IO_ABORTED IOMB Tag\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; if (pm8001_dev) @@ -2728,11 +2729,11 @@ static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; if (status != 0) { PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("%x phy execute %x phy op failed! \n", + pm8001_printk("%x phy execute %x phy op failed!\n", phy_id, phy_op)); } else PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("%x phy execute %x phy op success! \n", + pm8001_printk("%x phy execute %x phy op success!\n", phy_id, phy_op)); return 0; } @@ -3018,7 +3019,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) break; case PORT_INVALID: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk(" PortInvalid portID %d \n", port_id)); + pm8001_printk(" PortInvalid portID %d\n", port_id)); PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" Last phy Down and port invalid\n")); port->port_attached = 0; @@ -3027,7 +3028,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) break; case PORT_IN_RESET: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk(" Port In Reset portID %d \n", port_id)); + pm8001_printk(" Port In Reset portID %d\n", port_id)); break; case PORT_NOT_ESTABLISHED: PM8001_MSG_DBG(pm8001_ha, @@ -3220,7 +3221,7 @@ mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) pm8001_printk(" status = 0x%x\n", status)); for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("inb_IOMB_payload[0x%x] 0x%x, \n", i, + pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i, pPayload->inb_IOMB_payload[i])); return 0; } @@ -3312,12 +3313,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) break; case HW_EVENT_SAS_PHY_UP: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PHY_START_STATUS \n")); + pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); hw_event_sas_phy_up(pm8001_ha, piomb); break; case HW_EVENT_SATA_PHY_UP: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_SATA_PHY_UP \n")); + pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); hw_event_sata_phy_up(pm8001_ha, piomb); break; case HW_EVENT_PHY_STOP_STATUS: @@ -3329,12 +3330,12 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) break; case HW_EVENT_SATA_SPINUP_HOLD: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD \n")); + pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); break; case HW_EVENT_PHY_DOWN: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PHY_DOWN \n")); + pm8001_printk("HW_EVENT_PHY_DOWN\n")); sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); phy->phy_attached = 0; phy->phy_state = 0; @@ -3446,7 +3447,7 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) break; case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED \n")); + pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_PHY_RESET_FAILED, port_id, phy_id, 0, 0); @@ -3456,25 +3457,25 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) break; case HW_EVENT_PORT_RESET_TIMER_TMO: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO \n")); + pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_PORT_RECOVERY_TIMER_TMO: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO \n")); + pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_PORT_RECOVER: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PORT_RECOVER \n")); + pm8001_printk("HW_EVENT_PORT_RECOVER\n")); break; case HW_EVENT_PORT_RESET_COMPLETE: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE \n")); + pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); break; case EVENT_BROADCAST_ASYNCH_EVENT: PM8001_MSG_DBG(pm8001_ha, @@ -3502,21 +3503,21 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) switch (opc) { case OPC_OUB_ECHO: - PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO \n")); + PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); break; case OPC_OUB_HW_EVENT: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_HW_EVENT \n")); + pm8001_printk("OPC_OUB_HW_EVENT\n")); mpi_hw_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_COMP: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_SSP_COMP \n")); + pm8001_printk("OPC_OUB_SSP_COMP\n")); mpi_ssp_completion(pm8001_ha, piomb); break; case OPC_OUB_SMP_COMP: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_SMP_COMP \n")); + pm8001_printk("OPC_OUB_SMP_COMP\n")); mpi_smp_completion(pm8001_ha, piomb); break; case OPC_OUB_LOCAL_PHY_CNTRL: @@ -3526,26 +3527,26 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_DEV_REGIST \n")); + pm8001_printk("OPC_OUB_DEV_REGIST\n")); mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("unresgister the deviece \n")); + pm8001_printk("unresgister the deviece\n")); mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_GET_DEV_HANDLE \n")); + pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); break; case OPC_OUB_SATA_COMP: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_SATA_COMP \n")); + pm8001_printk("OPC_OUB_SATA_COMP\n")); mpi_sata_completion(pm8001_ha, piomb); break; case OPC_OUB_SATA_EVENT: PM8001_MSG_DBG(pm8001_ha, - pm8001_printk("OPC_OUB_SATA_EVENT \n")); + pm8001_printk("OPC_OUB_SATA_EVENT\n")); mpi_sata_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_EVENT: @@ -3858,19 +3859,19 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, circularQ = &pm8001_ha->inbnd_q_tbl[0]; if (task->data_dir == PCI_DMA_NONE) { ATAP = 0x04; /* no data*/ - PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data \n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); } else if (likely(!task->ata_task.device_control_reg_update)) { if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ - PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA \n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); } else { ATAP = 0x05; /* PIO*/ - PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO \n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); } if (task->ata_task.use_ncq && dev->sata_dev.command_set != ATAPI_COMMAND_SET) { ATAP = 0x07; /* FPDMA */ - PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA \n")); + PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 172cefb6deb9..c21a2163f9f6 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -61,7 +61,7 @@ static struct scsi_host_template pm8001_sht = { .name = DRV_NAME, .queuecommand = sas_queuecommand, .target_alloc = sas_target_alloc, - .slave_configure = pm8001_slave_configure, + .slave_configure = sas_slave_configure, .slave_destroy = sas_slave_destroy, .scan_finished = pm8001_scan_finished, .scan_start = pm8001_scan_start, @@ -76,7 +76,7 @@ static struct scsi_host_template pm8001_sht = { .use_clustering = ENABLE_CLUSTERING, .eh_device_reset_handler = sas_eh_device_reset_handler, .eh_bus_reset_handler = sas_eh_bus_reset_handler, - .slave_alloc = pm8001_slave_alloc, + .slave_alloc = sas_slave_alloc, .target_destroy = sas_target_destroy, .ioctl = sas_ioctl, .shost_attrs = pm8001_host_attrs, diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 6ae059ebb4bb..fb3dc9978861 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -210,26 +210,12 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, PM8001_CHIP_DISP->phy_stop_req(pm8001_ha, phy_id); break; default: - rc = -EOPNOTSUPP; + rc = -ENOSYS; } msleep(300); return rc; } -int pm8001_slave_alloc(struct scsi_device *scsi_dev) -{ - struct domain_device *dev = sdev_to_domain_dev(scsi_dev); - if (dev_is_sata(dev)) { - /* We don't need to rescan targets - * if REPORT_LUNS request is failed - */ - if (scsi_dev->lun > 0) - return -ENXIO; - scsi_dev->tagged_supported = 1; - } - return sas_slave_alloc(scsi_dev); -} - /** * pm8001_scan_start - we should enable all HBA phys by sending the phy_start * command to HBA. @@ -314,22 +300,7 @@ static int pm8001_task_prep_ssp(struct pm8001_hba_info *pm8001_ha, { return PM8001_CHIP_DISP->ssp_io_req(pm8001_ha, ccb); } -int pm8001_slave_configure(struct scsi_device *sdev) -{ - struct domain_device *dev = sdev_to_domain_dev(sdev); - int ret = sas_slave_configure(sdev); - if (ret) - return ret; - if (dev_is_sata(dev)) { - #ifdef PM8001_DISABLE_NCQ - struct ata_port *ap = dev->sata_dev.ap; - struct ata_device *adev = ap->link.device; - adev->flags |= ATA_DFLAG_NCQ_OFF; - scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1); - #endif - } - return 0; -} + /* Find the local port id that's attached to this device */ static int sas_find_local_port_id(struct domain_device *dev) { @@ -385,21 +356,8 @@ static int pm8001_task_exec(struct sas_task *task, const int num, do { dev = t->dev; pm8001_dev = dev->lldd_dev; - if (DEV_IS_GONE(pm8001_dev)) { - if (pm8001_dev) { - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("device %d not ready.\n", - pm8001_dev->device_id)); - } else { - PM8001_IO_DBG(pm8001_ha, - pm8001_printk("device %016llx not " - "ready.\n", SAS_ADDR(dev->sas_addr))); - } - rc = SAS_PHY_DOWN; - goto out_done; - } port = &pm8001_ha->port[sas_find_local_port_id(dev)]; - if (!port->port_attached) { + if (DEV_IS_GONE(pm8001_dev) || !port->port_attached) { if (sas_protocol_ata(t->task_proto)) { struct task_status_struct *ts = &t->task_status; ts->resp = SAS_TASK_UNDELIVERED; @@ -651,7 +609,7 @@ static int pm8001_dev_found_notify(struct domain_device *dev) flag = 1; /* directly sata*/ } } /*register this device to HBA*/ - PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device \n")); + PM8001_DISC_DBG(pm8001_ha, pm8001_printk("Found device\n")); PM8001_CHIP_DISP->reg_dev_req(pm8001_ha, pm8001_device, flag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); wait_for_completion(&completion); @@ -669,30 +627,6 @@ int pm8001_dev_found(struct domain_device *dev) return pm8001_dev_found_notify(dev); } -/** - * pm8001_alloc_task - allocate a task structure for TMF - */ -static struct sas_task *pm8001_alloc_task(void) -{ - struct sas_task *task = kzalloc(sizeof(*task), GFP_KERNEL); - if (task) { - INIT_LIST_HEAD(&task->list); - spin_lock_init(&task->task_state_lock); - task->task_state_flags = SAS_TASK_STATE_PENDING; - init_timer(&task->timer); - init_completion(&task->completion); - } - return task; -} - -static void pm8001_free_task(struct sas_task *task) -{ - if (task) { - BUG_ON(!list_empty(&task->list)); - kfree(task); - } -} - static void pm8001_task_done(struct sas_task *task) { if (!del_timer(&task->timer)) @@ -728,7 +662,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, struct pm8001_hba_info *pm8001_ha = pm8001_find_ha_by_dev(dev); for (retry = 0; retry < 3; retry++) { - task = pm8001_alloc_task(); + task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -789,14 +723,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); - pm8001_free_task(task); + sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); - if (task != NULL) - pm8001_free_task(task); + sas_free_task(task); return res; } @@ -811,7 +744,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, struct sas_task *task = NULL; for (retry = 0; retry < 3; retry++) { - task = pm8001_alloc_task(); + task = sas_alloc_task(GFP_KERNEL); if (!task) return -ENOMEM; @@ -864,14 +797,13 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, SAS_ADDR(dev->sas_addr), task->task_status.resp, task->task_status.stat)); - pm8001_free_task(task); + sas_free_task(task); task = NULL; } } ex_err: BUG_ON(retry == 3 && task != NULL); - if (task != NULL) - pm8001_free_task(task); + sas_free_task(task); return res; } @@ -1026,13 +958,14 @@ int pm8001_query_task(struct sas_task *task) /* The task is still in Lun, release it then */ case TMF_RESP_FUNC_SUCC: PM8001_EH_DBG(pm8001_ha, - pm8001_printk("The task is still in Lun \n")); + pm8001_printk("The task is still in Lun\n")); + break; /* The task is not in Lun or failed, reset the phy */ case TMF_RESP_FUNC_FAILED: case TMF_RESP_FUNC_COMPLETE: PM8001_EH_DBG(pm8001_ha, pm8001_printk("The task is not in Lun or failed," - " reset the phy \n")); + " reset the phy\n")); break; } } diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h index b97c8ab0c20e..93959febe205 100644 --- a/drivers/scsi/pm8001/pm8001_sas.h +++ b/drivers/scsi/pm8001/pm8001_sas.h @@ -471,8 +471,6 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, struct sas_task *task, struct pm8001_ccb_info *ccb, u32 ccb_idx); int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func, void *funcdata); -int pm8001_slave_alloc(struct scsi_device *scsi_dev); -int pm8001_slave_configure(struct scsi_device *sdev); void pm8001_scan_start(struct Scsi_Host *shost); int pm8001_scan_finished(struct Scsi_Host *shost, unsigned long time); int pm8001_queue_command(struct sas_task *task, const int num, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index a31e05f3bfd4..ac326c41e931 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -23,11 +23,23 @@ qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj, struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj, struct device, kobj))); struct qla_hw_data *ha = vha->hw; + int rval = 0; if (ha->fw_dump_reading == 0) return 0; - return memory_read_from_buffer(buf, count, &off, ha->fw_dump, + if (IS_QLA82XX(ha)) { + if (off < ha->md_template_size) { + rval = memory_read_from_buffer(buf, count, + &off, ha->md_tmplt_hdr, ha->md_template_size); + return rval; + } + off -= ha->md_template_size; + rval = memory_read_from_buffer(buf, count, + &off, ha->md_dump, ha->md_dump_size); + return rval; + } else + return memory_read_from_buffer(buf, count, &off, ha->fw_dump, ha->fw_dump_len); } @@ -41,12 +53,6 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; int reading; - if (IS_QLA82XX(ha)) { - ql_dbg(ql_dbg_user, vha, 0x705b, - "Firmware dump not supported for ISP82xx\n"); - return count; - } - if (off != 0) return (0); @@ -59,6 +65,10 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x705d, "Firmware dump cleared on (%ld).\n", vha->host_no); + if (IS_QLA82XX(vha->hw)) { + qla82xx_md_free(vha); + qla82xx_md_prep(vha); + } ha->fw_dump_reading = 0; ha->fw_dumped = 0; break; @@ -75,10 +85,29 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj, qla2x00_alloc_fw_dump(vha); break; case 3: - qla2x00_system_error(vha); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } else + qla2x00_system_error(vha); + break; + case 4: + if (IS_QLA82XX(ha)) { + if (ha->md_tmplt_hdr) + ql_dbg(ql_dbg_user, vha, 0x705b, + "MiniDump supported with this firmware.\n"); + else + ql_dbg(ql_dbg_user, vha, 0x709d, + "MiniDump not supported with this firmware.\n"); + } + break; + case 5: + if (IS_QLA82XX(ha)) + set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); break; } - return (count); + return -EINVAL; } static struct bin_attribute sysfs_fw_dump_attr = { @@ -122,7 +151,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size || !ha->isp_ops->write_nvram) - return 0; + return -EINVAL; /* Checksum NVRAM. */ if (IS_FWI2_CAPABLE(ha)) { @@ -165,7 +194,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj, qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); - return (count); + return count; } static struct bin_attribute sysfs_nvram_attr = { @@ -239,10 +268,10 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, int val, valid; if (off) - return 0; + return -EINVAL; if (unlikely(pci_channel_offline(ha->pdev))) - return 0; + return -EAGAIN; if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1) return -EINVAL; @@ -253,7 +282,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, case 0: if (ha->optrom_state != QLA_SREADING && ha->optrom_state != QLA_SWRITING) - break; + return -EINVAL; ha->optrom_state = QLA_SWAITING; @@ -266,7 +295,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 1: if (ha->optrom_state != QLA_SWAITING) - break; + return -EINVAL; ha->optrom_region_start = start; ha->optrom_region_size = start + size > ha->optrom_size ? @@ -280,7 +309,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x).\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return count; + return -ENOMEM; } if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { @@ -299,7 +328,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 2: if (ha->optrom_state != QLA_SWAITING) - break; + return -EINVAL; /* * We need to be more restrictive on which FLASH regions are @@ -347,7 +376,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, "(%x)\n", ha->optrom_region_size); ha->optrom_state = QLA_SWAITING; - return count; + return -ENOMEM; } ql_dbg(ql_dbg_user, vha, 0x7067, @@ -358,7 +387,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, break; case 3: if (ha->optrom_state != QLA_SWRITING) - break; + return -ENOMEM; if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7068, @@ -374,7 +403,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj, ha->optrom_region_start, ha->optrom_region_size); break; default: - count = -EINVAL; + return -EINVAL; } return count; } @@ -398,10 +427,10 @@ qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj, struct qla_hw_data *ha = vha->hw; if (unlikely(pci_channel_offline(ha->pdev))) - return 0; + return -EAGAIN; if (!capable(CAP_SYS_ADMIN)) - return 0; + return -EINVAL; if (IS_NOCACHE_VPD_TYPE(ha)) ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2, @@ -438,17 +467,17 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj, /* Update flash version information for 4Gb & above. */ if (!IS_FWI2_CAPABLE(ha)) - goto done; + return -EINVAL; tmp_data = vmalloc(256); if (!tmp_data) { ql_log(ql_log_warn, vha, 0x706b, "Unable to allocate memory for VPD information update.\n"); - goto done; + return -ENOMEM; } ha->isp_ops->get_flash_version(vha, tmp_data); vfree(tmp_data); -done: + return count; } @@ -505,8 +534,7 @@ do_read: "Unable to read SFP data (%x/%x/%x).\n", rval, addr, offset); - count = 0; - break; + return -EIO; } memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE); buf += SFP_BLOCK_SIZE; @@ -536,7 +564,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, int type; if (off != 0) - return 0; + return -EINVAL; type = simple_strtol(buf, NULL, 10); switch (type) { @@ -546,13 +574,18 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, scsi_block_requests(vha->host); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); + if (IS_QLA82XX(ha)) { + qla82xx_idc_lock(ha); + qla82xx_set_reset_owner(vha); + qla82xx_idc_unlock(ha); + } qla2xxx_wake_dpc(vha); qla2x00_wait_for_chip_reset(vha); scsi_unblock_requests(vha->host); break; case 0x2025d: if (!IS_QLA81XX(ha)) - break; + return -EPERM; ql_log(ql_log_info, vha, 0x706f, "Issuing MPI reset.\n"); @@ -571,7 +604,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj, if (!IS_QLA82XX(ha) || vha != base_vha) { ql_log(ql_log_info, vha, 0x7071, "FCoE ctx reset no supported.\n"); - return count; + return -EPERM; } ql_log(ql_log_info, vha, 0x7072, @@ -607,7 +640,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, ha->edc_data_len = 0; if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) - return 0; + return -EINVAL; if (!ha->edc_data) { ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -615,7 +648,7 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, if (!ha->edc_data) { ql_log(ql_log_warn, vha, 0x7073, "Unable to allocate memory for EDC write.\n"); - return 0; + return -ENOMEM; } } @@ -634,9 +667,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj, dev, adr, len, opt); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7074, - "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n", + "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n", rval, dev, adr, opt, len, buf[8]); - return 0; + return -EIO; } return count; @@ -665,7 +698,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, ha->edc_data_len = 0; if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8) - return 0; + return -EINVAL; if (!ha->edc_data) { ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, @@ -673,7 +706,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, if (!ha->edc_data) { ql_log(ql_log_warn, vha, 0x708c, "Unable to allocate memory for EDC status.\n"); - return 0; + return -ENOMEM; } } @@ -693,7 +726,7 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj, ql_log(ql_log_info, vha, 0x7075, "Unable to write EDC status (%x) %02x:%04x:%02x.\n", rval, dev, adr, opt, len); - return 0; + return -EIO; } ha->edc_data_len = len; @@ -805,7 +838,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, if (!ha->dcbx_tlv) { ql_log(ql_log_warn, vha, 0x7078, "Unable to allocate memory for DCBX TLV read-data.\n"); - return 0; + return -ENOMEM; } do_read: @@ -817,7 +850,7 @@ do_read: if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x7079, "Unable to read DCBX TLV (%x).\n", rval); - count = 0; + return -EIO; } memcpy(buf, ha->dcbx_tlv, count); diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c index 07d1767cd26b..8b641a8a0c74 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.c +++ b/drivers/scsi/qla2xxx/qla_bsg.c @@ -704,6 +704,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job) elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; if ((ha->current_topology == ISP_CFG_F || + (atomic_read(&vha->loop_state) == LOOP_DOWN) || (IS_QLA81XX(ha) && le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && @@ -1447,6 +1448,148 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job) } static int +qla2x00_update_fru_versions(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_image_version_list *list = (void *)bsg; + struct qla_image_version *image; + uint32_t count; + dma_addr_t sfp_dma; + void *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, list, sizeof(bsg)); + + image = list->version; + count = list->count; + while (count--) { + memcpy(sfp, &image->field_info, sizeof(image->field_info)); + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + image->field_address.device, image->field_address.offset, + sizeof(image->field_info), image->field_address.option); + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + image++; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int +qla2x00_read_fru_status(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + rval = qla2x00_read_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + sr->status_reg = *sfp; + + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, sr, sizeof(*sr)); + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int +qla2x00_write_fru_status(struct fc_bsg_job *bsg_job) +{ + struct Scsi_Host *host = bsg_job->shost; + scsi_qla_host_t *vha = shost_priv(host); + struct qla_hw_data *ha = vha->hw; + int rval = 0; + uint8_t bsg[DMA_POOL_SIZE]; + struct qla_status_reg *sr = (void *)bsg; + dma_addr_t sfp_dma; + uint8_t *sfp = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &sfp_dma); + if (!sfp) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_NO_MEMORY; + goto done; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, sr, sizeof(*sr)); + + *sfp = sr->status_reg; + rval = qla2x00_write_sfp(vha, sfp_dma, sfp, + sr->field_address.device, sr->field_address.offset, + sizeof(sr->status_reg), sr->field_address.option); + + if (rval) { + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = + EXT_STATUS_MAILBOX; + goto dealloc; + } + + bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; + +dealloc: + dma_pool_free(ha->s_dma_pool, sfp, sfp_dma); + +done: + bsg_job->reply_len = sizeof(struct fc_bsg_reply); + bsg_job->reply->result = DID_OK << 16; + bsg_job->job_done(bsg_job); + + return 0; +} + +static int qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) { switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) { @@ -1474,6 +1617,15 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job) case QL_VND_UPDATE_FLASH: return qla2x00_update_optrom(bsg_job); + case QL_VND_SET_FRU_VERSION: + return qla2x00_update_fru_versions(bsg_job); + + case QL_VND_READ_FRU_STATUS: + return qla2x00_read_fru_status(bsg_job); + + case QL_VND_WRITE_FRU_STATUS: + return qla2x00_write_fru_status(bsg_job); + default: bsg_job->reply->result = (DID_ERROR << 16); bsg_job->job_done(bsg_job); diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h index 0f0f54e35f06..70caa63a8930 100644 --- a/drivers/scsi/qla2xxx/qla_bsg.h +++ b/drivers/scsi/qla2xxx/qla_bsg.h @@ -16,6 +16,16 @@ #define QL_VND_FCP_PRIO_CFG_CMD 0x06 #define QL_VND_READ_FLASH 0x07 #define QL_VND_UPDATE_FLASH 0x08 +#define QL_VND_SET_FRU_VERSION 0x0B +#define QL_VND_READ_FRU_STATUS 0x0C +#define QL_VND_WRITE_FRU_STATUS 0x0D + +/* BSG Vendor specific subcode returns */ +#define EXT_STATUS_OK 0 +#define EXT_STATUS_ERR 1 +#define EXT_STATUS_INVALID_PARAM 6 +#define EXT_STATUS_MAILBOX 11 +#define EXT_STATUS_NO_MEMORY 17 /* BSG definations for interpreting CommandSent field */ #define INT_DEF_LB_LOOPBACK_CMD 0 @@ -141,4 +151,36 @@ struct qla_port_param { uint16_t mode; uint16_t speed; } __attribute__ ((packed)); + + +/* FRU VPD */ + +#define MAX_FRU_SIZE 36 + +struct qla_field_address { + uint16_t offset; + uint16_t device; + uint16_t option; +} __packed; + +struct qla_field_info { + uint8_t version[MAX_FRU_SIZE]; +} __packed; + +struct qla_image_version { + struct qla_field_address field_address; + struct qla_field_info field_info; +} __packed; + +struct qla_image_version_list { + uint32_t count; + struct qla_image_version version[0]; +} __packed; + +struct qla_status_reg { + struct qla_field_address field_address; + uint8_t status_reg; + uint8_t reserved[7]; +} __packed; + #endif diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c index d79cd8a5f831..9df4787715c0 100644 --- a/drivers/scsi/qla2xxx/qla_dbg.c +++ b/drivers/scsi/qla2xxx/qla_dbg.c @@ -12,7 +12,7 @@ * | Level | Last Value Used | Holes | * ---------------------------------------------------------------------- * | Module Init and Probe | 0x0116 | | - * | Mailbox commands | 0x1126 | | + * | Mailbox commands | 0x1129 | | * | Device Discovery | 0x2083 | | * | Queue Command and IO tracing | 0x302e | 0x3008 | * | DPC Thread | 0x401c | | @@ -22,7 +22,7 @@ * | Task Management | 0x8041 | | * | AER/EEH | 0x900f | | * | Virtual Port | 0xa007 | | - * | ISP82XX Specific | 0xb04f | | + * | ISP82XX Specific | 0xb051 | | * | MultiQ | 0xc00b | | * | Misc | 0xd00b | | * ---------------------------------------------------------------------- @@ -403,7 +403,7 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) return ptr + sizeof(struct qla2xxx_mq_chain); } -static void +void qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval) { struct qla_hw_data *ha = vha->hw; diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index a03eaf40f377..fcf052c50bf5 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2438,7 +2438,8 @@ struct qla_hw_data { uint32_t quiesce_owner:1; uint32_t thermal_supported:1; uint32_t isp82xx_reset_hdlr_active:1; - /* 26 bits */ + uint32_t isp82xx_reset_owner:1; + /* 28 bits */ } flags; /* This spinlock is used to protect "io transactions", you must @@ -2822,6 +2823,12 @@ struct qla_hw_data { uint8_t fw_type; __le32 file_prd_off; /* File firmware product offset */ + + uint32_t md_template_size; + void *md_tmplt_hdr; + dma_addr_t md_tmplt_hdr_dma; + void *md_dump; + uint32_t md_dump_size; }; /* diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 29b1a3e28231..ce32d8135c9e 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -104,6 +104,8 @@ extern int ql2xenablehba_err_chk; extern int ql2xtargetreset; extern int ql2xdontresethba; extern unsigned int ql2xmaxlun; +extern int ql2xmdcapmask; +extern int ql2xmdenable; extern int qla2x00_loop_reset(scsi_qla_host_t *); extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int); @@ -407,6 +409,8 @@ extern void qla2x00_beacon_blink(struct scsi_qla_host *); extern int qla24xx_beacon_on(struct scsi_qla_host *); extern int qla24xx_beacon_off(struct scsi_qla_host *); extern void qla24xx_beacon_blink(struct scsi_qla_host *); +extern int qla82xx_beacon_on(struct scsi_qla_host *); +extern int qla82xx_beacon_off(struct scsi_qla_host *); extern uint8_t *qla2x00_read_optrom_data(struct scsi_qla_host *, uint8_t *, uint32_t, uint32_t); @@ -442,6 +446,7 @@ extern void qla2x00_dump_buffer_zipped(uint8_t *, uint32_t); extern void ql_dump_regs(uint32_t, scsi_qla_host_t *, int32_t); extern void ql_dump_buffer(uint32_t, scsi_qla_host_t *, int32_t, uint8_t *, uint32_t); +extern void qla2xxx_dump_post_process(scsi_qla_host_t *, int); /* * Global Function Prototypes in qla_gs.c source file. @@ -569,7 +574,10 @@ extern int qla82xx_mbx_intr_enable(scsi_qla_host_t *); extern int qla82xx_mbx_intr_disable(scsi_qla_host_t *); extern void qla82xx_start_iocbs(srb_t *); extern int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *); +extern int qla82xx_check_md_needed(scsi_qla_host_t *); extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *); +extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int); +extern char *qdev_state(uint32_t); /* BSG related functions */ extern int qla24xx_bsg_request(struct fc_bsg_job *); @@ -579,4 +587,14 @@ extern int qla2x00_issue_iocb_timeout(scsi_qla_host_t *, void *, dma_addr_t, size_t, uint32_t); extern int qla2x00_get_idma_speed(scsi_qla_host_t *, uint16_t, uint16_t *, uint16_t *); + +/* Minidump related functions */ +extern int qla82xx_md_get_template_size(scsi_qla_host_t *); +extern int qla82xx_md_get_template(scsi_qla_host_t *); +extern int qla82xx_md_alloc(scsi_qla_host_t *); +extern void qla82xx_md_free(scsi_qla_host_t *); +extern int qla82xx_md_collect(scsi_qla_host_t *); +extern void qla82xx_md_prep(scsi_qla_host_t *); +extern void qla82xx_set_reset_owner(scsi_qla_host_t *); + #endif /* _QLA_GBL_H */ diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 37da04d3db26..f03e915f1877 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1480,13 +1480,19 @@ qla2x00_setup_chip(scsi_qla_host_t *vha) if (rval == QLA_SUCCESS) { enable_82xx_npiv: fw_major_version = ha->fw_major_version; - rval = qla2x00_get_fw_version(vha, - &ha->fw_major_version, - &ha->fw_minor_version, - &ha->fw_subminor_version, - &ha->fw_attributes, &ha->fw_memory_size, - ha->mpi_version, &ha->mpi_capabilities, - ha->phy_version); + if (IS_QLA82XX(ha)) + qla82xx_check_md_needed(vha); + else { + rval = qla2x00_get_fw_version(vha, + &ha->fw_major_version, + &ha->fw_minor_version, + &ha->fw_subminor_version, + &ha->fw_attributes, + &ha->fw_memory_size, + ha->mpi_version, + &ha->mpi_capabilities, + ha->phy_version); + } if (rval != QLA_SUCCESS) goto failed; ha->flags.npiv_supported = 0; @@ -1503,10 +1509,8 @@ enable_82xx_npiv: &ha->fw_xcb_count, NULL, NULL, &ha->max_npiv_vports, NULL); - if (!fw_major_version && ql2xallocfwdump) { - if (!IS_QLA82XX(ha)) - qla2x00_alloc_fw_dump(vha); - } + if (!fw_major_version && ql2xallocfwdump) + qla2x00_alloc_fw_dump(vha); } } else { ql_log(ql_log_fatal, vha, 0x00cd, @@ -1924,7 +1928,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) rval = qla84xx_init_chip(vha); if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, - vha, 0x8043, + vha, 0x8026, "Init chip failed.\n"); break; } @@ -1933,7 +1937,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha) cs84xx_time = jiffies - cs84xx_time; wtime += cs84xx_time; mtime += cs84xx_time; - ql_dbg(ql_dbg_taskm, vha, 0x8042, + ql_dbg(ql_dbg_taskm, vha, 0x8025, "Increasing wait time by %ld. " "New time %ld.\n", cs84xx_time, wtime); @@ -5443,11 +5447,7 @@ qla82xx_restart_isp(scsi_qla_host_t *vha) clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); /* Update the firmware version */ - qla2x00_get_fw_version(vha, &ha->fw_major_version, - &ha->fw_minor_version, &ha->fw_subminor_version, - &ha->fw_attributes, &ha->fw_memory_size, - ha->mpi_version, &ha->mpi_capabilities, - ha->phy_version); + status = qla82xx_check_md_needed(vha); if (ha->fce) { ha->flags.fce_enabled = 1; diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 8a7591f035e6..3474e86e98ab 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2060,6 +2060,11 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, case ELS_IOCB_TYPE: qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); break; + case MARKER_TYPE: + /* Do nothing in this case, this check is to prevent it + * from falling into default case + */ + break; default: /* Type Not Supported. */ ql_dbg(ql_dbg_async, vha, 0x5042, diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index f7604ea1af83..3b3cec9f6ac2 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -4186,3 +4186,130 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha) return rval; } + +int +qla82xx_md_get_template_size(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT_SIZE); + mcp->mb[3] = MSW(RQST_TMPLT_SIZE); + + mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + rval = qla2x00_mailbox_command(vha, mcp); + + /* Always copy back return mailbox values. */ + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1120, + "mailbox command FAILED=0x%x, subcode=%x.\n", + (mcp->mb[1] << 16) | mcp->mb[0], + (mcp->mb[3] << 16) | mcp->mb[2]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); + ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); + if (!ha->md_template_size) { + ql_dbg(ql_dbg_mbx, vha, 0x1122, + "Null template size obtained.\n"); + rval = QLA_FUNCTION_FAILED; + } + } + return rval; +} + +int +qla82xx_md_get_template(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + int rval = QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); + + ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, + ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); + if (!ha->md_tmplt_hdr) { + ql_log(ql_log_warn, vha, 0x1124, + "Unable to allocate memory for Minidump template.\n"); + return rval; + } + + memset(mcp->mb, 0 , sizeof(mcp->mb)); + mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[1] = MSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); + mcp->mb[2] = LSW(RQST_TMPLT); + mcp->mb[3] = MSW(RQST_TMPLT); + mcp->mb[4] = LSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[5] = MSW(LSD(ha->md_tmplt_hdr_dma)); + mcp->mb[6] = LSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[7] = MSW(MSD(ha->md_tmplt_hdr_dma)); + mcp->mb[8] = LSW(ha->md_template_size); + mcp->mb[9] = MSW(ha->md_template_size); + + mcp->flags = MBX_DMA_OUT|MBX_DMA_IN|IOCTL_CMD; + mcp->tov = MBX_TOV_SECONDS; + mcp->out_mb = MBX_11|MBX_10|MBX_9|MBX_8| + MBX_7|MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0; + mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0; + rval = qla2x00_mailbox_command(vha, mcp); + + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1125, + "mailbox command FAILED=0x%x, subcode=%x.\n", + ((mcp->mb[1] << 16) | mcp->mb[0]), + ((mcp->mb[3] << 16) | mcp->mb[2])); + } else + ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); + return rval; +} + +int +qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable) +{ + int rval; + struct qla_hw_data *ha = vha->hw; + mbx_cmd_t mc; + mbx_cmd_t *mcp = &mc; + + if (!IS_QLA82XX(ha)) + return QLA_FUNCTION_FAILED; + + ql_dbg(ql_dbg_mbx, vha, 0x1127, + "Entered %s.\n", __func__); + + memset(mcp, 0, sizeof(mbx_cmd_t)); + mcp->mb[0] = MBC_SET_LED_CONFIG; + if (enable) + mcp->mb[7] = 0xE; + else + mcp->mb[7] = 0xD; + + mcp->out_mb = MBX_7|MBX_0; + mcp->in_mb = MBX_0; + mcp->tov = 30; + mcp->flags = 0; + + rval = qla2x00_mailbox_command(vha, mcp); + if (rval != QLA_SUCCESS) { + ql_dbg(ql_dbg_mbx, vha, 0x1128, + "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); + } else { + ql_dbg(ql_dbg_mbx, vha, 0x1129, + "Done %s.\n", __func__); + } + + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c index 049807cda419..94bded5ddce4 100644 --- a/drivers/scsi/qla2xxx/qla_nx.c +++ b/drivers/scsi/qla2xxx/qla_nx.c @@ -7,6 +7,8 @@ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> +#include <linux/ratelimit.h> +#include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) @@ -328,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = { }; /* Device states */ -char *qdev_state[] = { +char *q_dev_state[] = { "Unknown", "Cold", "Initializing", @@ -339,6 +341,11 @@ char *qdev_state[] = { "Quiescent", }; +char *qdev_state(uint32_t dev_state) +{ + return q_dev_state[dev_state]; +} + /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr @@ -2355,9 +2362,13 @@ qla82xx_need_reset(struct qla_hw_data *ha) uint32_t drv_state; int rval; - drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); - rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); - return rval; + if (ha->flags.isp82xx_reset_owner) + return 1; + else { + drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); + rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); + return rval; + } } static inline void @@ -2374,8 +2385,8 @@ qla82xx_set_rst_ready(struct qla_hw_data *ha) drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); - ql_log(ql_log_info, vha, 0x00bb, - "drv_state = 0x%x.\n", drv_state); + ql_dbg(ql_dbg_init, vha, 0x00bb, + "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } @@ -2598,7 +2609,7 @@ qla2xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt, dsd_seg = (uint32_t *)&cmd_pkt->fcp_data_dseg_address; *dsd_seg++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *dsd_seg++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); - *dsd_seg++ = cpu_to_le32(dsd_list_len); + cmd_pkt->fcp_data_dseg_len = cpu_to_le32(dsd_list_len); } else { *cur_dsd++ = cpu_to_le32(LSD(dsd_ptr->dsd_list_dma)); *cur_dsd++ = cpu_to_le32(MSD(dsd_ptr->dsd_list_dma)); @@ -3529,6 +3540,7 @@ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; + uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; @@ -3541,15 +3553,32 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); } - qla82xx_set_rst_ready(ha); + drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (!ha->flags.isp82xx_reset_owner) { + ql_dbg(ql_dbg_p3p, vha, 0xb028, + "reset_acknowledged by 0x%x\n", ha->portnum); + qla82xx_set_rst_ready(ha); + } else { + active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); + drv_active &= active_mask; + ql_dbg(ql_dbg_p3p, vha, 0xb029, + "active_mask: 0x%08x\n", active_mask); + } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - while (drv_state != drv_active) { + ql_dbg(ql_dbg_p3p, vha, 0xb02a, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + + while (drv_state != drv_active && + dev_state != QLA82XX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); @@ -3560,23 +3589,87 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha) qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); + if (ha->flags.isp82xx_reset_owner) + drv_active &= active_mask; + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + ql_dbg(ql_dbg_p3p, vha, 0xb02b, + "drv_state: 0x%08x, drv_active: 0x%08x, " + "dev_state: 0x%08x, active_mask: 0x%08x\n", + drv_state, drv_active, dev_state, active_mask); + ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ - if (dev_state != QLA82XX_DEV_INITIALIZING) { + if (dev_state != QLA82XX_DEV_INITIALIZING && + dev_state != QLA82XX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); + if (ql2xmdenable) { + if (qla82xx_md_collect(vha)) + ql_log(ql_log_warn, vha, 0xb02c, + "Not able to collect minidump.\n"); + } else + ql_log(ql_log_warn, vha, 0xb04f, + "Minidump disabled.\n"); } } int +qla82xx_check_md_needed(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint16_t fw_major_version, fw_minor_version, fw_subminor_version; + int rval = QLA_SUCCESS; + + fw_major_version = ha->fw_major_version; + fw_minor_version = ha->fw_minor_version; + fw_subminor_version = ha->fw_subminor_version; + + rval = qla2x00_get_fw_version(vha, &ha->fw_major_version, + &ha->fw_minor_version, &ha->fw_subminor_version, + &ha->fw_attributes, &ha->fw_memory_size, + ha->mpi_version, &ha->mpi_capabilities, + ha->phy_version); + + if (rval != QLA_SUCCESS) + return rval; + + if (ql2xmdenable) { + if (!ha->fw_dumped) { + if (fw_major_version != ha->fw_major_version || + fw_minor_version != ha->fw_minor_version || + fw_subminor_version != ha->fw_subminor_version) { + + ql_log(ql_log_info, vha, 0xb02d, + "Firmware version differs " + "Previous version: %d:%d:%d - " + "New version: %d:%d:%d\n", + ha->fw_major_version, + ha->fw_minor_version, + ha->fw_subminor_version, + fw_major_version, fw_minor_version, + fw_subminor_version); + /* Release MiniDump resources */ + qla82xx_md_free(vha); + /* ALlocate MiniDump resources */ + qla82xx_md_prep(vha); + } else + ql_log(ql_log_info, vha, 0xb02e, + "Firmware dump available to retrieve\n", + vha->host_no); + } + } + return rval; +} + + +int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; @@ -3637,7 +3730,7 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); @@ -3659,26 +3752,33 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha) ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, - dev_state < MAX_STATES ? qdev_state[dev_state] : + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } switch (dev_state) { case QLA82XX_DEV_READY: + qla82xx_check_md_needed(vha); + ha->flags.isp82xx_reset_owner = 0; goto exit; case QLA82XX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); - goto exit; + break; case QLA82XX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA82XX_DEV_NEED_RESET: - if (!ql2xdontresethba) - qla82xx_need_reset_handler(vha); + if (!ql2xdontresethba) + qla82xx_need_reset_handler(vha); + else { + qla82xx_idc_unlock(ha); + msleep(1000); + qla82xx_idc_lock(ha); + } dev_init_timeout = jiffies + - (ha->nx_dev_init_timeout * HZ); + (ha->nx_dev_init_timeout * HZ); break; case QLA82XX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); @@ -3791,6 +3891,28 @@ int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) return rval; } +void +qla82xx_set_reset_owner(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t dev_state; + + dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); + if (dev_state == QLA82XX_DEV_READY) { + ql_log(ql_log_info, vha, 0xb02f, + "HW State: NEED RESET\n"); + qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, + QLA82XX_DEV_NEED_RESET); + ha->flags.isp82xx_reset_owner = 1; + ql_dbg(ql_dbg_p3p, vha, 0xb030, + "reset_owner is 0x%x\n", ha->portnum); + } else + ql_log(ql_log_info, vha, 0xb031, + "Device state is 0x%x = %s.\n", + dev_state, + dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); +} + /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. @@ -3806,7 +3928,6 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; - uint32_t dev_state; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, @@ -3816,16 +3937,7 @@ qla82xx_abort_isp(scsi_qla_host_t *vha) ha->flags.isp82xx_reset_hdlr_active = 1; qla82xx_idc_lock(ha); - dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); - if (dev_state == QLA82XX_DEV_READY) { - ql_log(ql_log_info, vha, 0x8025, - "HW State: NEED RESET.\n"); - qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, - QLA82XX_DEV_NEED_RESET); - } else - ql_log(ql_log_info, vha, 0x8026, - "Hw State: %s.\n", dev_state < MAX_STATES ? - qdev_state[dev_state] : "Unknown"); + qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); rval = qla82xx_device_state_handler(vha); @@ -4016,3 +4128,803 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) } } } + +/* Minidump related functions */ +int +qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) +{ + uint32_t off_value, rval = 0; + + WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase), + (off & 0xFFFF0000)); + + /* Read back value to make sure write has gone through */ + RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase)); + off_value = (off & 0x0000FFFF); + + if (flag) + WRT_REG_DWORD((void *) + (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), + data); + else + rval = RD_REG_DWORD((void *) + (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); + + return rval; +} + +static int +qla82xx_minidump_process_control(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + struct qla82xx_md_entry_crb *crb_entry; + uint32_t read_value, opcode, poll_time; + uint32_t addr, index, crb_addr; + unsigned long wtime; + struct qla82xx_md_template_hdr *tmplt_hdr; + uint32_t rval = QLA_SUCCESS; + int i; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; + crb_addr = crb_entry->addr; + + for (i = 0; i < crb_entry->op_count; i++) { + opcode = crb_entry->crb_ctrl.opcode; + if (opcode & QLA82XX_DBG_OPCODE_WR) { + qla82xx_md_rw_32(ha, crb_addr, + crb_entry->value_1, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WR; + } + + if (opcode & QLA82XX_DBG_OPCODE_RW) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_RW; + } + + if (opcode & QLA82XX_DBG_OPCODE_AND) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value &= crb_entry->value_2; + opcode &= ~QLA82XX_DBG_OPCODE_AND; + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value |= crb_entry->value_3; + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + } + + if (opcode & QLA82XX_DBG_OPCODE_OR) { + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + read_value |= crb_entry->value_3; + qla82xx_md_rw_32(ha, crb_addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_OR; + } + + if (opcode & QLA82XX_DBG_OPCODE_POLL) { + poll_time = crb_entry->crb_strd.poll_timeout; + wtime = jiffies + poll_time; + read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); + + do { + if ((read_value & crb_entry->value_2) + == crb_entry->value_1) + break; + else if (time_after_eq(jiffies, wtime)) { + /* capturing dump failed */ + rval = QLA_FUNCTION_FAILED; + break; + } else + read_value = qla82xx_md_rw_32(ha, + crb_addr, 0, 0); + } while (1); + opcode &= ~QLA82XX_DBG_OPCODE_POLL; + } + + if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + read_value = qla82xx_md_rw_32(ha, addr, 0, 0); + index = crb_entry->crb_ctrl.state_index_v; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { + if (crb_entry->crb_strd.state_index_a) { + index = crb_entry->crb_strd.state_index_a; + addr = tmplt_hdr->saved_state_array[index]; + } else + addr = crb_addr; + + if (crb_entry->crb_ctrl.state_index_v) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = + tmplt_hdr->saved_state_array[index]; + } else + read_value = crb_entry->value_1; + + qla82xx_md_rw_32(ha, addr, read_value, 1); + opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; + } + + if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { + index = crb_entry->crb_ctrl.state_index_v; + read_value = tmplt_hdr->saved_state_array[index]; + read_value <<= crb_entry->crb_ctrl.shl; + read_value >>= crb_entry->crb_ctrl.shr; + if (crb_entry->value_2) + read_value &= crb_entry->value_2; + read_value |= crb_entry->value_3; + read_value += crb_entry->value_1; + tmplt_hdr->saved_state_array[index] = read_value; + opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; + } + crb_addr += crb_entry->crb_strd.addr_stride; + } + return rval; +} + +static void +qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_rdocm *ocm_hdr; + uint32_t *data_ptr = *d_ptr; + + ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; + r_addr = ocm_hdr->read_addr; + r_stride = ocm_hdr->read_addr_stride; + loop_cnt = ocm_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase)); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; + struct qla82xx_md_entry_mux *mux_hdr; + uint32_t *data_ptr = *d_ptr; + + mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; + r_addr = mux_hdr->read_addr; + s_addr = mux_hdr->select_addr; + s_stride = mux_hdr->select_value_stride; + s_value = mux_hdr->select_value; + loop_cnt = mux_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, s_value, 1); + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(s_value); + *data_ptr++ = cpu_to_le32(r_value); + s_value += s_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_stride, loop_cnt, i, r_value; + struct qla82xx_md_entry_crb *crb_hdr; + uint32_t *data_ptr = *d_ptr; + + crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; + r_addr = crb_hdr->addr; + r_stride = crb_hdr->crb_strd.addr_stride; + loop_cnt = crb_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_addr); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + unsigned long p_wait, w_time, p_mask; + uint32_t c_value_w, c_value_r; + struct qla82xx_md_entry_cache *cache_hdr; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + p_wait = cache_hdr->cache_ctrl.poll_wait; + p_mask = cache_hdr->cache_ctrl.poll_mask; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + if (c_value_w) + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + + if (p_mask) { + w_time = jiffies + p_wait; + do { + c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); + if ((c_value_r & p_mask) == 0) + break; + else if (time_after_eq(jiffies, w_time)) { + /* capturing dump failed */ + ql_dbg(ql_dbg_p3p, vha, 0xb032, + "c_value_r: 0x%x, poll_mask: 0x%lx, " + "w_time: 0x%lx\n", + c_value_r, p_mask, w_time); + return rval; + } + } while (1); + } + + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static void +qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t addr, r_addr, c_addr, t_r_addr; + uint32_t i, k, loop_count, t_value, r_cnt, r_value; + uint32_t c_value_w; + struct qla82xx_md_entry_cache *cache_hdr; + uint32_t *data_ptr = *d_ptr; + + cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; + loop_count = cache_hdr->op_count; + r_addr = cache_hdr->read_addr; + c_addr = cache_hdr->control_addr; + c_value_w = cache_hdr->cache_ctrl.write_value; + + t_r_addr = cache_hdr->tag_reg_addr; + t_value = cache_hdr->addr_ctrl.init_tag_value; + r_cnt = cache_hdr->read_ctrl.read_addr_cnt; + + for (i = 0; i < loop_count; i++) { + qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); + qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); + addr = r_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + addr += cache_hdr->read_ctrl.read_addr_stride; + } + t_value += cache_hdr->addr_ctrl.tag_value_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_queue(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t s_addr, r_addr; + uint32_t r_stride, r_value, r_cnt, qid = 0; + uint32_t i, k, loop_cnt; + struct qla82xx_md_entry_queue *q_hdr; + uint32_t *data_ptr = *d_ptr; + + q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; + s_addr = q_hdr->select_addr; + r_cnt = q_hdr->rd_strd.read_addr_cnt; + r_stride = q_hdr->rd_strd.read_addr_stride; + loop_cnt = q_hdr->op_count; + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, s_addr, qid, 1); + r_addr = q_hdr->read_addr; + for (k = 0; k < r_cnt; k++) { + r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += r_stride; + } + qid += q_hdr->q_strd.queue_id_stride; + } + *d_ptr = data_ptr; +} + +static void +qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value; + uint32_t i, loop_cnt; + struct qla82xx_md_entry_rdrom *rom_hdr; + uint32_t *data_ptr = *d_ptr; + + rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; + r_addr = rom_hdr->read_addr; + loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); + + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, + (r_addr & 0xFFFF0000), 1); + r_value = qla82xx_md_rw_32(ha, + MD_DIRECT_ROM_READ_BASE + + (r_addr & 0x0000FFFF), 0, 0); + *data_ptr++ = cpu_to_le32(r_value); + r_addr += sizeof(uint32_t); + } + *d_ptr = data_ptr; +} + +static int +qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) +{ + struct qla_hw_data *ha = vha->hw; + uint32_t r_addr, r_value, r_data; + uint32_t i, j, loop_cnt; + struct qla82xx_md_entry_rdmem *m_hdr; + unsigned long flags; + int rval = QLA_FUNCTION_FAILED; + uint32_t *data_ptr = *d_ptr; + + m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; + r_addr = m_hdr->read_addr; + loop_cnt = m_hdr->read_data_size/16; + + if (r_addr & 0xf) { + ql_log(ql_log_warn, vha, 0xb033, + "Read addr 0x%x not 16 bytes alligned\n", r_addr); + return rval; + } + + if (m_hdr->read_data_size % 16) { + ql_log(ql_log_warn, vha, 0xb034, + "Read data[0x%x] not multiple of 16 bytes\n", + m_hdr->read_data_size); + return rval; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb035, + "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", + __func__, r_addr, m_hdr->read_data_size, loop_cnt); + + write_lock_irqsave(&ha->hw_lock, flags); + for (i = 0; i < loop_cnt; i++) { + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); + r_value = 0; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); + r_value = MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; + qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); + + for (j = 0; j < MAX_CTL_CHECK; j++) { + r_value = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_CTRL, 0, 0); + if ((r_value & MIU_TA_CTL_BUSY) == 0) + break; + } + + if (j >= MAX_CTL_CHECK) { + printk_ratelimited(KERN_ERR + "failed to read through agent\n"); + write_unlock_irqrestore(&ha->hw_lock, flags); + return rval; + } + + for (j = 0; j < 4; j++) { + r_data = qla82xx_md_rw_32(ha, + MD_MIU_TEST_AGT_RDDATA[j], 0, 0); + *data_ptr++ = cpu_to_le32(r_data); + } + r_addr += 16; + } + write_unlock_irqrestore(&ha->hw_lock, flags); + *d_ptr = data_ptr; + return QLA_SUCCESS; +} + +static int +qla82xx_validate_template_chksum(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + uint64_t chksum = 0; + uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; + int count = ha->md_template_size/sizeof(uint32_t); + + while (count-- > 0) + chksum += *d_ptr++; + while (chksum >> 32) + chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); + return ~chksum; +} + +static void +qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, + qla82xx_md_entry_hdr_t *entry_hdr, int index) +{ + entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb036, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + index, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); +} + +int +qla82xx_md_collect(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int no_entry_hdr = 0; + qla82xx_md_entry_hdr_t *entry_hdr; + struct qla82xx_md_template_hdr *tmplt_hdr; + uint32_t *data_ptr; + uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; + int i = 0, rval = QLA_FUNCTION_FAILED; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + data_ptr = (uint32_t *)ha->md_dump; + + if (ha->fw_dumped) { + ql_log(ql_log_info, vha, 0xb037, + "Firmware dump available to retrive\n"); + goto md_failed; + } + + ha->fw_dumped = 0; + + if (!ha->md_tmplt_hdr || !ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb038, + "Memory not allocated for minidump capture\n"); + goto md_failed; + } + + if (qla82xx_validate_template_chksum(vha)) { + ql_log(ql_log_info, vha, 0xb039, + "Template checksum validation error\n"); + goto md_failed; + } + + no_entry_hdr = tmplt_hdr->num_of_entries; + ql_dbg(ql_dbg_p3p, vha, 0xb03a, + "No of entry headers in Template: 0x%x\n", no_entry_hdr); + + ql_dbg(ql_dbg_p3p, vha, 0xb03b, + "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); + + f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; + + /* Validate whether required debug level is set */ + if ((f_capture_mask & 0x3) != 0x3) { + ql_log(ql_log_warn, vha, 0xb03c, + "Minimum required capture mask[0x%x] level not set\n", + f_capture_mask); + goto md_failed; + } + tmplt_hdr->driver_capture_mask = ql2xmdcapmask; + + tmplt_hdr->driver_info[0] = vha->host_no; + tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | + (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | + QLA_DRIVER_BETA_VER; + + total_data_size = ha->md_dump_size; + + ql_dbg(ql_log_info, vha, 0xb03d, + "Total minidump data_size 0x%x to be captured\n", total_data_size); + + /* Check whether template obtained is valid */ + if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { + ql_log(ql_log_warn, vha, 0xb04e, + "Bad template header entry type: 0x%x obtained\n", + tmplt_hdr->entry_type); + goto md_failed; + } + + entry_hdr = (qla82xx_md_entry_hdr_t *) \ + (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); + + /* Walk through the entry headers */ + for (i = 0; i < no_entry_hdr; i++) { + + if (data_collected > total_data_size) { + ql_log(ql_log_warn, vha, 0xb03e, + "More MiniDump data collected: [0x%x]\n", + data_collected); + goto md_failed; + } + + if (!(entry_hdr->d_ctrl.entry_capture_mask & + ql2xmdcapmask)) { + entry_hdr->d_ctrl.driver_flags |= + QLA82XX_DBG_SKIPPED_FLAG; + ql_dbg(ql_dbg_p3p, vha, 0xb03f, + "Skipping entry[%d]: " + "ETYPE[0x%x]-ELEVEL[0x%x]\n", + i, entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + goto skip_nxt_entry; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb040, + "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" + "entry_type: 0x%x, captrue_mask: 0x%x\n", + __func__, i, data_ptr, entry_hdr, + entry_hdr->entry_type, + entry_hdr->d_ctrl.entry_capture_mask); + + ql_dbg(ql_dbg_p3p, vha, 0xb041, + "Data collected: [0x%x], Dump size left:[0x%x]\n", + data_collected, (ha->md_dump_size - data_collected)); + + /* Decode the entry type and take + * required action to capture debug data */ + switch (entry_hdr->entry_type) { + case QLA82XX_RDEND: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + case QLA82XX_CNTRL: + rval = qla82xx_minidump_process_control(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_RDCRB: + qla82xx_minidump_process_rdcrb(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMEM: + rval = qla82xx_minidump_process_rdmem(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_BOARD: + case QLA82XX_RDROM: + qla82xx_minidump_process_rdrom(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_L2DTG: + case QLA82XX_L2ITG: + case QLA82XX_L2DAT: + case QLA82XX_L2INS: + rval = qla82xx_minidump_process_l2tag(vha, + entry_hdr, &data_ptr); + if (rval != QLA_SUCCESS) { + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + goto md_failed; + } + break; + case QLA82XX_L1DAT: + case QLA82XX_L1INS: + qla82xx_minidump_process_l1cache(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDOCM: + qla82xx_minidump_process_rdocm(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDMUX: + qla82xx_minidump_process_rdmux(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_QUEUE: + qla82xx_minidump_process_queue(vha, + entry_hdr, &data_ptr); + break; + case QLA82XX_RDNOP: + default: + qla82xx_mark_entry_skipped(vha, entry_hdr, i); + break; + } + + ql_dbg(ql_dbg_p3p, vha, 0xb042, + "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); + + data_collected = (uint8_t *)data_ptr - + (uint8_t *)ha->md_dump; +skip_nxt_entry: + entry_hdr = (qla82xx_md_entry_hdr_t *) \ + (((uint8_t *)entry_hdr) + entry_hdr->entry_size); + } + + if (data_collected != total_data_size) { + ql_dbg(ql_log_warn, vha, 0xb043, + "MiniDump data mismatch: Data collected: [0x%x]," + "total_data_size:[0x%x]\n", + data_collected, total_data_size); + goto md_failed; + } + + ql_log(ql_log_info, vha, 0xb044, + "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", + vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); + ha->fw_dumped = 1; + qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); + +md_failed: + return rval; +} + +int +qla82xx_md_alloc(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int i, k; + struct qla82xx_md_template_hdr *tmplt_hdr; + + tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; + + if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { + ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; + ql_log(ql_log_info, vha, 0xb045, + "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", + ql2xmdcapmask); + } + + for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { + if (i & ql2xmdcapmask) + ha->md_dump_size += tmplt_hdr->capture_size_array[k]; + } + + if (ha->md_dump) { + ql_log(ql_log_warn, vha, 0xb046, + "Firmware dump previously allocated.\n"); + return 1; + } + + ha->md_dump = vmalloc(ha->md_dump_size); + if (ha->md_dump == NULL) { + ql_log(ql_log_warn, vha, 0xb047, + "Unable to allocate memory for Minidump size " + "(0x%x).\n", ha->md_dump_size); + return 1; + } + return 0; +} + +void +qla82xx_md_free(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + + /* Release the template header allocated */ + if (ha->md_tmplt_hdr) { + ql_log(ql_log_info, vha, 0xb048, + "Free MiniDump template: %p, size (%d KB)\n", + ha->md_tmplt_hdr, ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = 0; + } + + /* Release the template data buffer allocated */ + if (ha->md_dump) { + ql_log(ql_log_info, vha, 0xb049, + "Free MiniDump memory: %p, size (%d KB)\n", + ha->md_dump, ha->md_dump_size / 1024); + vfree(ha->md_dump); + ha->md_dump_size = 0; + ha->md_dump = 0; + } +} + +void +qla82xx_md_prep(scsi_qla_host_t *vha) +{ + struct qla_hw_data *ha = vha->hw; + int rval; + + /* Get Minidump template size */ + rval = qla82xx_md_get_template_size(vha); + if (rval == QLA_SUCCESS) { + ql_log(ql_log_info, vha, 0xb04a, + "MiniDump Template size obtained (%d KB)\n", + ha->md_template_size / 1024); + + /* Get Minidump template */ + rval = qla82xx_md_get_template(vha); + if (rval == QLA_SUCCESS) { + ql_dbg(ql_dbg_p3p, vha, 0xb04b, + "MiniDump Template obtained\n"); + + /* Allocate memory for minidump */ + rval = qla82xx_md_alloc(vha); + if (rval == QLA_SUCCESS) + ql_log(ql_log_info, vha, 0xb04c, + "MiniDump memory allocated (%d KB)\n", + ha->md_dump_size / 1024); + else { + ql_log(ql_log_info, vha, 0xb04d, + "Free MiniDump template: %p, size: (%d KB)\n", + ha->md_tmplt_hdr, + ha->md_template_size / 1024); + dma_free_coherent(&ha->pdev->dev, + ha->md_template_size, + ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); + ha->md_tmplt_hdr = 0; + } + + } + } +} + +int +qla82xx_beacon_on(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 1); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb050, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 1; +exit: + qla82xx_idc_unlock(ha); + return rval; +} + +int +qla82xx_beacon_off(struct scsi_qla_host *vha) +{ + + int rval; + struct qla_hw_data *ha = vha->hw; + qla82xx_idc_lock(ha); + rval = qla82xx_mbx_beacon_ctl(vha, 0); + + if (rval) { + ql_log(ql_log_warn, vha, 0xb051, + "mbx set led config failed in %s\n", __func__); + goto exit; + } + ha->beacon_blink_led = 0; +exit: + qla82xx_idc_unlock(ha); + return rval; +} diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h index 8a21832c6693..57820c199bc2 100644 --- a/drivers/scsi/qla2xxx/qla_nx.h +++ b/drivers/scsi/qla2xxx/qla_nx.h @@ -484,8 +484,6 @@ #define QLA82XX_ADDR_OCM1 (0x0000000200400000ULL) #define QLA82XX_ADDR_OCM1_MAX (0x00000002004fffffULL) #define QLA82XX_ADDR_QDR_NET (0x0000000300000000ULL) - -#define QLA82XX_P2_ADDR_QDR_NET_MAX (0x00000003001fffffULL) #define QLA82XX_P3_ADDR_QDR_NET_MAX (0x0000000303ffffffULL) #define QLA82XX_PCI_CRBSPACE (unsigned long)0x06000000 @@ -890,6 +888,7 @@ struct ct6_dsd { }; #define MBC_TOGGLE_INTERRUPT 0x10 +#define MBC_SET_LED_CONFIG 0x125 /* Flash offset */ #define FLT_REG_BOOTLOAD_82XX 0x72 @@ -922,4 +921,256 @@ struct ct6_dsd { #define M25P_INSTR_DP 0xb9 #define M25P_INSTR_RES 0xab +/* Minidump related */ + +/* + * Version of the template + * 4 Bytes + * X.Major.Minor.RELEASE + */ +#define QLA82XX_MINIDUMP_VERSION 0x10101 + +/* + * Entry Type Defines + */ +#define QLA82XX_RDNOP 0 +#define QLA82XX_RDCRB 1 +#define QLA82XX_RDMUX 2 +#define QLA82XX_QUEUE 3 +#define QLA82XX_BOARD 4 +#define QLA82XX_RDSRE 5 +#define QLA82XX_RDOCM 6 +#define QLA82XX_CACHE 10 +#define QLA82XX_L1DAT 11 +#define QLA82XX_L1INS 12 +#define QLA82XX_L2DTG 21 +#define QLA82XX_L2ITG 22 +#define QLA82XX_L2DAT 23 +#define QLA82XX_L2INS 24 +#define QLA82XX_RDROM 71 +#define QLA82XX_RDMEM 72 +#define QLA82XX_CNTRL 98 +#define QLA82XX_TLHDR 99 +#define QLA82XX_RDEND 255 + +/* + * Opcodes for Control Entries. + * These Flags are bit fields. + */ +#define QLA82XX_DBG_OPCODE_WR 0x01 +#define QLA82XX_DBG_OPCODE_RW 0x02 +#define QLA82XX_DBG_OPCODE_AND 0x04 +#define QLA82XX_DBG_OPCODE_OR 0x08 +#define QLA82XX_DBG_OPCODE_POLL 0x10 +#define QLA82XX_DBG_OPCODE_RDSTATE 0x20 +#define QLA82XX_DBG_OPCODE_WRSTATE 0x40 +#define QLA82XX_DBG_OPCODE_MDSTATE 0x80 + +/* + * Template Header and Entry Header definitions start here. + */ + +/* + * Template Header + * Parts of the template header can be modified by the driver. + * These include the saved_state_array, capture_debug_level, driver_timestamp + */ + +#define QLA82XX_DBG_STATE_ARRAY_LEN 16 +#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8 +#define QLA82XX_DBG_RSVD_ARRAY_LEN 8 + +/* + * Driver Flags + */ +#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */ +#define QLA82XX_DEFAULT_CAP_MASK 0xFF /* default capture mask */ + +struct qla82xx_md_template_hdr { + uint32_t entry_type; + uint32_t first_entry_offset; + uint32_t size_of_template; + uint32_t capture_debug_level; + + uint32_t num_of_entries; + uint32_t version; + uint32_t driver_timestamp; + uint32_t template_checksum; + + uint32_t driver_capture_mask; + uint32_t driver_info[3]; + + uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN]; + uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN]; + + /* markers_array used to capture some special locations on board */ + uint32_t markers_array[QLA82XX_DBG_RSVD_ARRAY_LEN]; + uint32_t num_of_free_entries; /* For internal use */ + uint32_t free_entry_offset; /* For internal use */ + uint32_t total_table_size; /* For internal use */ + uint32_t bkup_table_offset; /* For internal use */ +} __packed; + +/* + * Entry Header: Common to All Entry Types + */ + +/* + * Driver Code is for driver to write some info about the entry. + * Currently not used. + */ +typedef struct qla82xx_md_entry_hdr { + uint32_t entry_type; + uint32_t entry_size; + uint32_t entry_capture_size; + struct { + uint8_t entry_capture_mask; + uint8_t entry_code; + uint8_t driver_code; + uint8_t driver_flags; + } d_ctrl; +} __packed qla82xx_md_entry_hdr_t; + +/* + * Read CRB entry header + */ +struct qla82xx_md_entry_crb { + qla82xx_md_entry_hdr_t h; + uint32_t addr; + struct { + uint8_t addr_stride; + uint8_t state_index_a; + uint16_t poll_timeout; + } crb_strd; + + uint32_t data_size; + uint32_t op_count; + + struct { + uint8_t opcode; + uint8_t state_index_v; + uint8_t shl; + uint8_t shr; + } crb_ctrl; + + uint32_t value_1; + uint32_t value_2; + uint32_t value_3; +} __packed; + +/* + * Cache entry header + */ +struct qla82xx_md_entry_cache { + qla82xx_md_entry_hdr_t h; + + uint32_t tag_reg_addr; + struct { + uint16_t tag_value_stride; + uint16_t init_tag_value; + } addr_ctrl; + + uint32_t data_size; + uint32_t op_count; + + uint32_t control_addr; + struct { + uint16_t write_value; + uint8_t poll_mask; + uint8_t poll_wait; + } cache_ctrl; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_1; + } read_ctrl; +} __packed; + +/* + * Read OCM + */ +struct qla82xx_md_entry_rdocm { + qla82xx_md_entry_hdr_t h; + + uint32_t rsvd_0; + uint32_t rsvd_1; + uint32_t data_size; + uint32_t op_count; + + uint32_t rsvd_2; + uint32_t rsvd_3; + uint32_t read_addr; + uint32_t read_addr_stride; + uint32_t read_addr_cntrl; +} __packed; + +/* + * Read Memory + */ +struct qla82xx_md_entry_rdmem { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +/* + * Read ROM + */ +struct qla82xx_md_entry_rdrom { + qla82xx_md_entry_hdr_t h; + uint32_t rsvd[6]; + uint32_t read_addr; + uint32_t read_data_size; +} __packed; + +struct qla82xx_md_entry_mux { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + uint32_t rsvd_0; + uint32_t data_size; + uint32_t op_count; + + uint32_t select_value; + uint32_t select_value_stride; + uint32_t read_addr; + uint32_t rsvd_1; +} __packed; + +struct qla82xx_md_entry_queue { + qla82xx_md_entry_hdr_t h; + + uint32_t select_addr; + struct { + uint16_t queue_id_stride; + uint16_t rsvd_0; + } q_strd; + + uint32_t data_size; + uint32_t op_count; + uint32_t rsvd_1; + uint32_t rsvd_2; + + uint32_t read_addr; + struct { + uint8_t read_addr_stride; + uint8_t read_addr_cnt; + uint16_t rsvd_3; + } rd_strd; +} __packed; + +#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129 +#define RQST_TMPLT_SIZE 0x0 +#define RQST_TMPLT 0x1 +#define MD_DIRECT_ROM_WINDOW 0x42110030 +#define MD_DIRECT_ROM_READ_BASE 0x42150000 +#define MD_MIU_TEST_AGT_CTRL 0x41000090 +#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094 +#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098 + +static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC, + 0x410000B8, 0x410000BC }; #endif diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 1e69527f1e4e..fd14c7bfc626 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -143,7 +143,7 @@ MODULE_PARM_DESC(ql2xmultique_tag, "Set it to 1 to turn on the cpu affinity."); int ql2xfwloadbin; -module_param(ql2xfwloadbin, int, S_IRUGO); +module_param(ql2xfwloadbin, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xfwloadbin, "Option to specify location from which to load ISP firmware:.\n" " 2 -- load firmware via the request_firmware() (hotplug).\n" @@ -158,11 +158,11 @@ MODULE_PARM_DESC(ql2xetsenable, "Default is 0 - skip ETS enablement."); int ql2xdbwr = 1; -module_param(ql2xdbwr, int, S_IRUGO); +module_param(ql2xdbwr, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdbwr, - "Option to specify scheme for request queue posting.\n" - " 0 -- Regular doorbell.\n" - " 1 -- CAMRAM doorbell (faster).\n"); + "Option to specify scheme for request queue posting.\n" + " 0 -- Regular doorbell.\n" + " 1 -- CAMRAM doorbell (faster).\n"); int ql2xtargetreset = 1; module_param(ql2xtargetreset, int, S_IRUGO); @@ -183,11 +183,11 @@ MODULE_PARM_DESC(ql2xasynctmfenable, "Default is 0 - Issue TM IOCBs via mailbox mechanism."); int ql2xdontresethba; -module_param(ql2xdontresethba, int, S_IRUGO); +module_param(ql2xdontresethba, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(ql2xdontresethba, - "Option to specify reset behaviour.\n" - " 0 (Default) -- Reset on failure.\n" - " 1 -- Do not reset on failure.\n"); + "Option to specify reset behaviour.\n" + " 0 (Default) -- Reset on failure.\n" + " 1 -- Do not reset on failure.\n"); uint ql2xmaxlun = MAX_LUNS; module_param(ql2xmaxlun, uint, S_IRUGO); @@ -195,6 +195,19 @@ MODULE_PARM_DESC(ql2xmaxlun, "Defines the maximum LU number to register with the SCSI " "midlayer. Default is 65535."); +int ql2xmdcapmask = 0x1F; +module_param(ql2xmdcapmask, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdcapmask, + "Set the Minidump driver capture mask level. " + "Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F."); + +int ql2xmdenable; +module_param(ql2xmdenable, int, S_IRUGO); +MODULE_PARM_DESC(ql2xmdenable, + "Enable/disable MiniDump. " + "0 (Default) - MiniDump disabled. " + "1 - MiniDump enabled."); + /* * SCSI host template entry points */ @@ -1750,9 +1763,9 @@ static struct isp_operations qla82xx_isp_ops = { .read_nvram = qla24xx_read_nvram_data, .write_nvram = qla24xx_write_nvram_data, .fw_dump = qla24xx_fw_dump, - .beacon_on = qla24xx_beacon_on, - .beacon_off = qla24xx_beacon_off, - .beacon_blink = qla24xx_beacon_blink, + .beacon_on = qla82xx_beacon_on, + .beacon_off = qla82xx_beacon_off, + .beacon_blink = NULL, .read_optrom = qla82xx_read_optrom_data, .write_optrom = qla82xx_write_optrom_data, .get_flash_version = qla24xx_get_flash_version, @@ -2670,6 +2683,8 @@ qla2x00_free_device(scsi_qla_host_t *vha) qla2x00_mem_free(ha); + qla82xx_md_free(vha); + qla2x00_free_queues(ha); } @@ -3903,8 +3918,11 @@ qla2x00_timer(scsi_qla_host_t *vha) /* Check if beacon LED needs to be blinked for physical host only */ if (!vha->vp_idx && (ha->beacon_blink_led == 1)) { - set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); - start_dpc++; + /* There is no beacon_blink function for ISP82xx */ + if (!IS_QLA82XX(ha)) { + set_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags); + start_dpc++; + } } /* Process any deferred work. */ diff --git a/drivers/scsi/qla4xxx/Kconfig b/drivers/scsi/qla4xxx/Kconfig index 0f5599e0abf6..f1ad02ea212b 100644 --- a/drivers/scsi/qla4xxx/Kconfig +++ b/drivers/scsi/qla4xxx/Kconfig @@ -2,6 +2,7 @@ config SCSI_QLA_ISCSI tristate "QLogic ISP4XXX and ISP82XX host adapter family support" depends on PCI && SCSI && NET select SCSI_ISCSI_ATTRS + select ISCSI_BOOT_SYSFS ---help--- This driver supports the QLogic 40xx (ISP4XXX) and 8022 (ISP82XX) iSCSI host adapter family. diff --git a/drivers/scsi/qla4xxx/Makefile b/drivers/scsi/qla4xxx/Makefile index 252523d7847e..5b44139ff43d 100644 --- a/drivers/scsi/qla4xxx/Makefile +++ b/drivers/scsi/qla4xxx/Makefile @@ -1,5 +1,5 @@ qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \ - ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o + ql4_nx.o ql4_nvram.o ql4_dbg.o ql4_attr.o ql4_bsg.o obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c index 864d018631c0..0b0a7d42137d 100644 --- a/drivers/scsi/qla4xxx/ql4_attr.c +++ b/drivers/scsi/qla4xxx/ql4_attr.c @@ -55,15 +55,91 @@ qla4xxx_optrom_version_show(struct device *dev, struct device_attribute *attr, ha->bootload_patch, ha->bootload_build); } +static ssize_t +qla4xxx_board_id_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + return snprintf(buf, PAGE_SIZE, "0x%08X\n", ha->board_id); +} + +static ssize_t +qla4xxx_fw_state_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + qla4xxx_get_firmware_state(ha); + return snprintf(buf, PAGE_SIZE, "0x%08X%8X\n", ha->firmware_state, + ha->addl_fw_state); +} + +static ssize_t +qla4xxx_phy_port_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (!is_qla8022(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_cnt); +} + +static ssize_t +qla4xxx_phy_port_num_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (!is_qla8022(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->phy_port_num); +} + +static ssize_t +qla4xxx_iscsi_func_cnt_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + if (!is_qla8022(ha)) + return -ENOSYS; + + return snprintf(buf, PAGE_SIZE, "0x%04X\n", ha->iscsi_pci_func_cnt); +} + +static ssize_t +qla4xxx_hba_model_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct scsi_qla_host *ha = to_qla_host(class_to_shost(dev)); + + return snprintf(buf, PAGE_SIZE, "%s\n", ha->model_name); +} + static DEVICE_ATTR(fw_version, S_IRUGO, qla4xxx_fw_version_show, NULL); static DEVICE_ATTR(serial_num, S_IRUGO, qla4xxx_serial_num_show, NULL); static DEVICE_ATTR(iscsi_version, S_IRUGO, qla4xxx_iscsi_version_show, NULL); static DEVICE_ATTR(optrom_version, S_IRUGO, qla4xxx_optrom_version_show, NULL); +static DEVICE_ATTR(board_id, S_IRUGO, qla4xxx_board_id_show, NULL); +static DEVICE_ATTR(fw_state, S_IRUGO, qla4xxx_fw_state_show, NULL); +static DEVICE_ATTR(phy_port_cnt, S_IRUGO, qla4xxx_phy_port_cnt_show, NULL); +static DEVICE_ATTR(phy_port_num, S_IRUGO, qla4xxx_phy_port_num_show, NULL); +static DEVICE_ATTR(iscsi_func_cnt, S_IRUGO, qla4xxx_iscsi_func_cnt_show, NULL); +static DEVICE_ATTR(hba_model, S_IRUGO, qla4xxx_hba_model_show, NULL); struct device_attribute *qla4xxx_host_attrs[] = { &dev_attr_fw_version, &dev_attr_serial_num, &dev_attr_iscsi_version, &dev_attr_optrom_version, + &dev_attr_board_id, + &dev_attr_fw_state, + &dev_attr_phy_port_cnt, + &dev_attr_phy_port_num, + &dev_attr_iscsi_func_cnt, + &dev_attr_hba_model, NULL, }; diff --git a/drivers/scsi/qla4xxx/ql4_bsg.c b/drivers/scsi/qla4xxx/ql4_bsg.c new file mode 100644 index 000000000000..8acdc582ff6d --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_bsg.c @@ -0,0 +1,513 @@ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2011 QLogic Corporation + * + * See LICENSE.qla4xxx for copyright and licensing details. + */ + +#include "ql4_def.h" +#include "ql4_glbl.h" +#include "ql4_bsg.h" + +static int +qla4xxx_read_flash(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t offset = 0; + uint32_t length = 0; + dma_addr_t flash_dma; + uint8_t *flash = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (ha->flash_state != QLFLASH_WAITING) { + ql4_printk(KERN_ERR, ha, "%s: another flash operation " + "active\n", __func__); + rval = -EBUSY; + goto leave; + } + + ha->flash_state = QLFLASH_READING; + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + length = bsg_job->reply_payload.payload_len; + + flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, + GFP_KERNEL); + if (!flash) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_flash(ha, flash_dma, offset, length); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get flash failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + flash, length); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); +leave: + ha->flash_state = QLFLASH_WAITING; + return rval; +} + +static int +qla4xxx_update_flash(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + uint32_t length = 0; + uint32_t offset = 0; + uint32_t options = 0; + dma_addr_t flash_dma; + uint8_t *flash = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (ha->flash_state != QLFLASH_WAITING) { + ql4_printk(KERN_ERR, ha, "%s: another flash operation " + "active\n", __func__); + rval = -EBUSY; + goto leave; + } + + ha->flash_state = QLFLASH_WRITING; + length = bsg_job->request_payload.payload_len; + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + options = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + + flash = dma_alloc_coherent(&ha->pdev->dev, length, &flash_dma, + GFP_KERNEL); + if (!flash) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, flash, length); + + rval = qla4xxx_set_flash(ha, flash_dma, offset, length, options); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set flash failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, length, flash, flash_dma); +leave: + ha->flash_state = QLFLASH_WAITING; + return rval; +} + +static int +qla4xxx_get_acb_state(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t status[MBOX_REG_COUNT]; + uint32_t acb_idx; + uint32_t ip_idx; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 4022 and above adapters are supported */ + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + if (bsg_job->reply_payload.payload_len < sizeof(status)) { + ql4_printk(KERN_ERR, ha, "%s: invalid payload len %d\n", + __func__, bsg_job->reply_payload.payload_len); + rval = -EINVAL; + goto leave; + } + + acb_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + ip_idx = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + + rval = qla4xxx_get_ip_state(ha, acb_idx, ip_idx, status); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get ip state failed\n", + __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + status, sizeof(status)); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +leave: + return rval; +} + +static int +qla4xxx_read_nvram(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t offset = 0; + uint32_t len = 0; + uint32_t total_len = 0; + dma_addr_t nvram_dma; + uint8_t *nvram = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 40xx adapters are supported */ + if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->reply_payload.payload_len; + total_len = offset + len; + + /* total len should not be greater than max NVRAM size */ + if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || + ((is_qla4022(ha) || is_qla4032(ha)) && + total_len > QL40X2_NVRAM_SIZE)) { + ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" + " nvram size, offset=%d len=%d\n", + __func__, offset, len); + goto leave; + } + + nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, + GFP_KERNEL); + if (!nvram) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for nvram " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_nvram(ha, nvram_dma, offset, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + nvram, len); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); +leave: + return rval; +} + +static int +qla4xxx_update_nvram(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t offset = 0; + uint32_t len = 0; + uint32_t total_len = 0; + dma_addr_t nvram_dma; + uint8_t *nvram = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (!(is_qla4010(ha) || is_qla4022(ha) || is_qla4032(ha))) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + offset = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->request_payload.payload_len; + total_len = offset + len; + + /* total len should not be greater than max NVRAM size */ + if ((is_qla4010(ha) && total_len > QL4010_NVRAM_SIZE) || + ((is_qla4022(ha) || is_qla4032(ha)) && + total_len > QL40X2_NVRAM_SIZE)) { + ql4_printk(KERN_ERR, ha, "%s: offset+len greater than max" + " nvram size, offset=%d len=%d\n", + __func__, offset, len); + goto leave; + } + + nvram = dma_alloc_coherent(&ha->pdev->dev, len, &nvram_dma, + GFP_KERNEL); + if (!nvram) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for flash " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + sg_copy_to_buffer(bsg_job->request_payload.sg_list, + bsg_job->request_payload.sg_cnt, nvram, len); + + rval = qla4xxx_set_nvram(ha, nvram_dma, offset, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, nvram, nvram_dma); +leave: + return rval; +} + +static int +qla4xxx_restore_defaults(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t region = 0; + uint32_t field0 = 0; + uint32_t field1 = 0; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + region = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + field0 = bsg_req->rqst_data.h_vendor.vendor_cmd[2]; + field1 = bsg_req->rqst_data.h_vendor.vendor_cmd[3]; + + rval = qla4xxx_restore_factory_defaults(ha, region, field0, field1); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: set nvram failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else + bsg_reply->result = DID_OK << 16; + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); +leave: + return rval; +} + +static int +qla4xxx_bsg_get_acb(struct bsg_job *bsg_job) +{ + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + uint32_t acb_type = 0; + uint32_t len = 0; + dma_addr_t acb_dma; + uint8_t *acb = NULL; + int rval = -EINVAL; + + bsg_reply->reply_payload_rcv_len = 0; + + if (unlikely(pci_channel_offline(ha->pdev))) + goto leave; + + /* Only 4022 and above adapters are supported */ + if (is_qla4010(ha)) + goto leave; + + if (ql4xxx_reset_active(ha)) { + ql4_printk(KERN_ERR, ha, "%s: reset active\n", __func__); + rval = -EBUSY; + goto leave; + } + + acb_type = bsg_req->rqst_data.h_vendor.vendor_cmd[1]; + len = bsg_job->reply_payload.payload_len; + if (len < sizeof(struct addr_ctrl_blk)) { + ql4_printk(KERN_ERR, ha, "%s: invalid acb len %d\n", + __func__, len); + rval = -EINVAL; + goto leave; + } + + acb = dma_alloc_coherent(&ha->pdev->dev, len, &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: dma alloc failed for acb " + "data\n", __func__); + rval = -ENOMEM; + goto leave; + } + + rval = qla4xxx_get_acb(ha, acb_dma, acb_type, len); + if (rval) { + ql4_printk(KERN_ERR, ha, "%s: get acb failed\n", __func__); + bsg_reply->result = DID_ERROR << 16; + rval = -EIO; + } else { + bsg_reply->reply_payload_rcv_len = + sg_copy_from_buffer(bsg_job->reply_payload.sg_list, + bsg_job->reply_payload.sg_cnt, + acb, len); + bsg_reply->result = DID_OK << 16; + } + + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + dma_free_coherent(&ha->pdev->dev, len, acb, acb_dma); +leave: + return rval; +} + +/** + * qla4xxx_process_vendor_specific - handle vendor specific bsg request + * @job: iscsi_bsg_job to handle + **/ +int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job) +{ + struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + + switch (bsg_req->rqst_data.h_vendor.vendor_cmd[0]) { + case QLISCSI_VND_READ_FLASH: + return qla4xxx_read_flash(bsg_job); + + case QLISCSI_VND_UPDATE_FLASH: + return qla4xxx_update_flash(bsg_job); + + case QLISCSI_VND_GET_ACB_STATE: + return qla4xxx_get_acb_state(bsg_job); + + case QLISCSI_VND_READ_NVRAM: + return qla4xxx_read_nvram(bsg_job); + + case QLISCSI_VND_UPDATE_NVRAM: + return qla4xxx_update_nvram(bsg_job); + + case QLISCSI_VND_RESTORE_DEFAULTS: + return qla4xxx_restore_defaults(bsg_job); + + case QLISCSI_VND_GET_ACB: + return qla4xxx_bsg_get_acb(bsg_job); + + default: + ql4_printk(KERN_ERR, ha, "%s: invalid BSG vendor command: " + "0x%x\n", __func__, bsg_req->msgcode); + bsg_reply->result = (DID_ERROR << 16); + bsg_reply->reply_payload_rcv_len = 0; + bsg_job_done(bsg_job, bsg_reply->result, + bsg_reply->reply_payload_rcv_len); + return -ENOSYS; + } +} + +/** + * qla4xxx_bsg_request - handle bsg request from ISCSI transport + * @job: iscsi_bsg_job to handle + */ +int qla4xxx_bsg_request(struct bsg_job *bsg_job) +{ + struct iscsi_bsg_request *bsg_req = bsg_job->request; + struct Scsi_Host *host = iscsi_job_to_shost(bsg_job); + struct scsi_qla_host *ha = to_qla_host(host); + + switch (bsg_req->msgcode) { + case ISCSI_BSG_HST_VENDOR: + return qla4xxx_process_vendor_specific(bsg_job); + + default: + ql4_printk(KERN_ERR, ha, "%s: invalid BSG command: 0x%x\n", + __func__, bsg_req->msgcode); + } + + return -ENOSYS; +} diff --git a/drivers/scsi/qla4xxx/ql4_bsg.h b/drivers/scsi/qla4xxx/ql4_bsg.h new file mode 100644 index 000000000000..c6a0364509fd --- /dev/null +++ b/drivers/scsi/qla4xxx/ql4_bsg.h @@ -0,0 +1,19 @@ +/* + * QLogic iSCSI HBA Driver + * Copyright (c) 2011 QLogic Corporation + * + * See LICENSE.qla4xxx for copyright and licensing details. + */ +#ifndef __QL4_BSG_H +#define __QL4_BSG_H + +/* BSG Vendor specific commands */ +#define QLISCSI_VND_READ_FLASH 1 +#define QLISCSI_VND_UPDATE_FLASH 2 +#define QLISCSI_VND_GET_ACB_STATE 3 +#define QLISCSI_VND_READ_NVRAM 4 +#define QLISCSI_VND_UPDATE_NVRAM 5 +#define QLISCSI_VND_RESTORE_DEFAULTS 6 +#define QLISCSI_VND_GET_ACB 7 + +#endif diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h index 473c5c872b39..ace637bf254e 100644 --- a/drivers/scsi/qla4xxx/ql4_def.h +++ b/drivers/scsi/qla4xxx/ql4_def.h @@ -25,6 +25,7 @@ #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/aer.h> +#include <linux/bsg-lib.h> #include <net/tcp.h> #include <scsi/scsi.h> @@ -33,9 +34,14 @@ #include <scsi/scsi_cmnd.h> #include <scsi/scsi_transport.h> #include <scsi/scsi_transport_iscsi.h> +#include <scsi/scsi_bsg_iscsi.h> +#include <scsi/scsi_netlink.h> +#include <scsi/libiscsi.h> #include "ql4_dbg.h" #include "ql4_nx.h" +#include "ql4_fw.h" +#include "ql4_nvram.h" #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010 #define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010 @@ -109,7 +115,7 @@ #define MAX_BUSES 1 #define MAX_TARGETS MAX_DEV_DB_ENTRIES #define MAX_LUNS 0xffff -#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */ +#define MAX_AEN_ENTRIES MAX_DEV_DB_ENTRIES #define MAX_DDB_ENTRIES MAX_DEV_DB_ENTRIES #define MAX_PDU_ENTRIES 32 #define INVALID_ENTRY 0xFFFF @@ -166,6 +172,7 @@ #define RELOGIN_TOV 18 #define ISNS_DEREG_TOV 5 #define HBA_ONLINE_TOV 30 +#define DISABLE_ACB_TOV 30 #define MAX_RESET_HA_RETRIES 2 @@ -227,52 +234,12 @@ struct ql4_aen_log { * Device Database (DDB) structure */ struct ddb_entry { - struct list_head list; /* ddb list */ struct scsi_qla_host *ha; struct iscsi_cls_session *sess; struct iscsi_cls_conn *conn; - atomic_t state; /* DDB State */ - - unsigned long flags; /* DDB Flags */ - uint16_t fw_ddb_index; /* DDB firmware index */ - uint16_t options; uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */ - - uint32_t CmdSn; - uint16_t target_session_id; - uint16_t connection_id; - uint16_t exe_throttle; /* Max mumber of cmds outstanding - * simultaneously */ - uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to - * complete */ - uint16_t default_relogin_timeout; /* Max time to wait for - * relogin to complete */ - uint16_t tcp_source_port_num; - uint32_t default_time2wait; /* Default Min time between - * relogins (+aens) */ - - atomic_t retry_relogin_timer; /* Min Time between relogins - * (4000 only) */ - atomic_t relogin_timer; /* Max Time to wait for relogin to complete */ - atomic_t relogin_retry_count; /* Num of times relogin has been - * retried */ - - uint16_t port; - uint32_t tpgt; - uint8_t ip_addr[IP_ADDR_LEN]; - uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */ - uint8_t iscsi_alias[0x20]; - uint8_t isid[6]; - uint16_t iscsi_max_burst_len; - uint16_t iscsi_max_outsnd_r2t; - uint16_t iscsi_first_burst_len; - uint16_t iscsi_max_rcv_data_seg_len; - uint16_t iscsi_max_snd_data_seg_len; - - struct in6_addr remote_ipv6_addr; - struct in6_addr link_local_ipv6_addr; }; /* @@ -293,8 +260,6 @@ struct ddb_entry { #define DF_FO_MASKED 3 -#include "ql4_fw.h" -#include "ql4_nvram.h" struct ql82xx_hw_data { /* Offsets for flash/nvram access (set to ~0 if not used). */ @@ -312,7 +277,10 @@ struct ql82xx_hw_data { uint32_t flt_region_boot; uint32_t flt_region_bootload; uint32_t flt_region_fw; - uint32_t reserved; + + uint32_t flt_iscsi_param; + uint32_t flt_region_chap; + uint32_t flt_chap_size; }; struct qla4_8xxx_legacy_intr_set { @@ -357,6 +325,68 @@ struct isp_operations { int (*get_sys_info) (struct scsi_qla_host *); }; +/*qla4xxx ipaddress configuration details */ +struct ipaddress_config { + uint16_t ipv4_options; + uint16_t tcp_options; + uint16_t ipv4_vlan_tag; + uint8_t ipv4_addr_state; + uint8_t ip_address[IP_ADDR_LEN]; + uint8_t subnet_mask[IP_ADDR_LEN]; + uint8_t gateway[IP_ADDR_LEN]; + uint32_t ipv6_options; + uint32_t ipv6_addl_options; + uint8_t ipv6_link_local_state; + uint8_t ipv6_addr0_state; + uint8_t ipv6_addr1_state; + uint8_t ipv6_default_router_state; + uint16_t ipv6_vlan_tag; + struct in6_addr ipv6_link_local_addr; + struct in6_addr ipv6_addr0; + struct in6_addr ipv6_addr1; + struct in6_addr ipv6_default_router_addr; + uint16_t eth_mtu_size; + uint16_t ipv4_port; + uint16_t ipv6_port; +}; + +#define QL4_CHAP_MAX_NAME_LEN 256 +#define QL4_CHAP_MAX_SECRET_LEN 100 +#define LOCAL_CHAP 0 +#define BIDI_CHAP 1 + +struct ql4_chap_format { + u8 intr_chap_name[QL4_CHAP_MAX_NAME_LEN]; + u8 intr_secret[QL4_CHAP_MAX_SECRET_LEN]; + u8 target_chap_name[QL4_CHAP_MAX_NAME_LEN]; + u8 target_secret[QL4_CHAP_MAX_SECRET_LEN]; + u16 intr_chap_name_length; + u16 intr_secret_length; + u16 target_chap_name_length; + u16 target_secret_length; +}; + +struct ip_address_format { + u8 ip_type; + u8 ip_address[16]; +}; + +struct ql4_conn_info { + u16 dest_port; + struct ip_address_format dest_ipaddr; + struct ql4_chap_format chap; +}; + +struct ql4_boot_session_info { + u8 target_name[224]; + struct ql4_conn_info conn_list[1]; +}; + +struct ql4_boot_tgt_info { + struct ql4_boot_session_info boot_pri_sess; + struct ql4_boot_session_info boot_sec_sess; +}; + /* * Linux Host Adapter structure */ @@ -451,10 +481,6 @@ struct scsi_qla_host { /* --- From Init_FW --- */ /* init_cb_t *init_cb; */ uint16_t firmware_options; - uint16_t tcp_options; - uint8_t ip_address[IP_ADDR_LEN]; - uint8_t subnet_mask[IP_ADDR_LEN]; - uint8_t gateway[IP_ADDR_LEN]; uint8_t alias[32]; uint8_t name_string[256]; uint8_t heartbeat_interval; @@ -462,7 +488,7 @@ struct scsi_qla_host { /* --- From FlashSysInfo --- */ uint8_t my_mac[MAC_ADDR_LEN]; uint8_t serial_number[16]; - + uint16_t port_num; /* --- From GetFwState --- */ uint32_t firmware_state; uint32_t addl_fw_state; @@ -524,31 +550,13 @@ struct scsi_qla_host { volatile uint8_t mbox_status_count; volatile uint32_t mbox_status[MBOX_REG_COUNT]; - /* local device database list (contains internal ddb entries) */ - struct list_head ddb_list; - - /* Map ddb_list entry by FW ddb index */ + /* FW ddb index map */ struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES]; /* Saved srb for status continuation entry processing */ struct srb *status_srb; - /* IPv6 support info from InitFW */ uint8_t acb_version; - uint8_t ipv4_addr_state; - uint16_t ipv4_options; - - uint32_t resvd2; - uint32_t ipv6_options; - uint32_t ipv6_addl_options; - uint8_t ipv6_link_local_state; - uint8_t ipv6_addr0_state; - uint8_t ipv6_addr1_state; - uint8_t ipv6_default_router_state; - struct in6_addr ipv6_link_local_addr; - struct in6_addr ipv6_addr0; - struct in6_addr ipv6_addr1; - struct in6_addr ipv6_default_router_addr; /* qla82xx specific fields */ struct device_reg_82xx __iomem *qla4_8xxx_reg; /* Base I/O address */ @@ -584,6 +592,11 @@ struct scsi_qla_host { struct completion mbx_intr_comp; + struct ipaddress_config ip_config; + struct iscsi_iface *iface_ipv4; + struct iscsi_iface *iface_ipv6_0; + struct iscsi_iface *iface_ipv6_1; + /* --- From About Firmware --- */ uint16_t iscsi_major; uint16_t iscsi_minor; @@ -591,16 +604,60 @@ struct scsi_qla_host { uint16_t bootload_minor; uint16_t bootload_patch; uint16_t bootload_build; + + uint32_t flash_state; +#define QLFLASH_WAITING 0 +#define QLFLASH_READING 1 +#define QLFLASH_WRITING 2 + struct dma_pool *chap_dma_pool; + uint8_t *chap_list; /* CHAP table cache */ + struct mutex chap_sem; +#define CHAP_DMA_BLOCK_SIZE 512 + struct workqueue_struct *task_wq; + unsigned long ddb_idx_map[MAX_DDB_ENTRIES / BITS_PER_LONG]; +#define SYSFS_FLAG_FW_SEL_BOOT 2 + struct iscsi_boot_kset *boot_kset; + struct ql4_boot_tgt_info boot_tgt; + uint16_t phy_port_num; + uint16_t phy_port_cnt; + uint16_t iscsi_pci_func_cnt; + uint8_t model_name[16]; + struct completion disable_acb_comp; +}; + +struct ql4_task_data { + struct scsi_qla_host *ha; + uint8_t iocb_req_cnt; + dma_addr_t data_dma; + void *req_buffer; + dma_addr_t req_dma; + uint32_t req_len; + void *resp_buffer; + dma_addr_t resp_dma; + uint32_t resp_len; + struct iscsi_task *task; + struct passthru_status sts; + struct work_struct task_work; +}; + +struct qla_endpoint { + struct Scsi_Host *host; + struct sockaddr dst_addr; +}; + +struct qla_conn { + struct qla_endpoint *qla_ep; }; static inline int is_ipv4_enabled(struct scsi_qla_host *ha) { - return ((ha->ipv4_options & IPOPT_IPv4_PROTOCOL_ENABLE) != 0); + return ((ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) != 0); } static inline int is_ipv6_enabled(struct scsi_qla_host *ha) { - return ((ha->ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0); + return ((ha->ip_config.ipv6_options & + IPV6_OPT_IPV6_PROTOCOL_ENABLE) != 0); } static inline int is_qla4010(struct scsi_qla_host *ha) @@ -618,6 +675,11 @@ static inline int is_qla4032(struct scsi_qla_host *ha) return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4032; } +static inline int is_qla40XX(struct scsi_qla_host *ha) +{ + return is_qla4032(ha) || is_qla4022(ha) || is_qla4010(ha); +} + static inline int is_qla8022(struct scsi_qla_host *ha) { return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP8022; @@ -640,7 +702,7 @@ static inline int adapter_up(struct scsi_qla_host *ha) static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost) { - return (struct scsi_qla_host *)shost->hostdata; + return (struct scsi_qla_host *)iscsi_host_priv(shost); } static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha) @@ -760,6 +822,16 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a) ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK); } +static inline int ql4xxx_reset_active(struct scsi_qla_host *ha) +{ + return test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || + test_bit(DPC_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || + test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags) || + test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags); + +} /*---------------------------------------------------------------------------*/ /* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */ diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h index 01082aa77098..cbd5a20dbbd1 100644 --- a/drivers/scsi/qla4xxx/ql4_fw.h +++ b/drivers/scsi/qla4xxx/ql4_fw.h @@ -146,6 +146,13 @@ struct isp_reg { #define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16)) #define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16)) +/* nvram address for 4032 */ +#define NVRAM_PORT0_BOOT_MODE 0x03b1 +#define NVRAM_PORT0_BOOT_PRI_TGT 0x03b2 +#define NVRAM_PORT0_BOOT_SEC_TGT 0x03bb +#define NVRAM_PORT1_BOOT_MODE 0x07b1 +#define NVRAM_PORT1_BOOT_PRI_TGT 0x07b2 +#define NVRAM_PORT1_BOOT_SEC_TGT 0x07bb /* Page # defines for 4022 */ @@ -194,6 +201,9 @@ static inline uint32_t clr_rmask(uint32_t val) /* ISP 4022 nvram definitions */ #define NVR_WRITE_ENABLE 0x00000010 /* 4022 */ +#define QL4010_NVRAM_SIZE 0x200 +#define QL40X2_NVRAM_SIZE 0x800 + /* ISP port_status definitions */ /* ISP Semaphore definitions */ @@ -241,6 +251,8 @@ union external_hw_config_reg { #define FA_BOOT_CODE_ADDR_82 0x20000 #define FA_RISC_CODE_ADDR_82 0x40000 #define FA_GOLD_RISC_CODE_ADDR_82 0x80000 +#define FA_FLASH_ISCSI_CHAP 0x540000 +#define FA_FLASH_CHAP_SIZE 0xC0000 /* Flash Description Table */ struct qla_fdt_layout { @@ -296,8 +308,11 @@ struct qla_flt_header { #define FLT_REG_FLT 0x1c #define FLT_REG_BOOTLOAD_82 0x72 #define FLT_REG_FW_82 0x74 +#define FLT_REG_FW_82_1 0x97 #define FLT_REG_GOLD_FW_82 0x75 #define FLT_REG_BOOT_CODE_82 0x78 +#define FLT_REG_ISCSI_PARAM 0x65 +#define FLT_REG_ISCSI_CHAP 0x63 struct qla_flt_region { uint32_t code; @@ -331,9 +346,11 @@ struct qla_flt_region { #define MBOX_CMD_WRITE_FLASH 0x0025 #define MBOX_CMD_READ_FLASH 0x0026 #define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031 +#define MBOX_CMD_CONN_OPEN 0x0074 #define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056 -#define LOGOUT_OPTION_CLOSE_SESSION 0x01 -#define LOGOUT_OPTION_RELOGIN 0x02 +#define LOGOUT_OPTION_CLOSE_SESSION 0x0002 +#define LOGOUT_OPTION_RELOGIN 0x0004 +#define LOGOUT_OPTION_FREE_DDB 0x0008 #define MBOX_CMD_EXECUTE_IOCB_A64 0x005A #define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060 #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061 @@ -342,12 +359,15 @@ struct qla_flt_region { #define MBOX_CMD_GET_DATABASE_ENTRY 0x0064 #define DDB_DS_UNASSIGNED 0x00 #define DDB_DS_NO_CONNECTION_ACTIVE 0x01 +#define DDB_DS_DISCOVERY 0x02 #define DDB_DS_SESSION_ACTIVE 0x04 #define DDB_DS_SESSION_FAILED 0x06 #define DDB_DS_LOGIN_IN_PROCESS 0x07 #define MBOX_CMD_GET_FW_STATE 0x0069 #define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A #define MBOX_CMD_GET_SYS_INFO 0x0078 +#define MBOX_CMD_GET_NVRAM 0x0078 /* For 40xx */ +#define MBOX_CMD_SET_NVRAM 0x0079 /* For 40xx */ #define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 #define MBOX_CMD_SET_ACB 0x0088 #define MBOX_CMD_GET_ACB 0x0089 @@ -375,7 +395,10 @@ struct qla_flt_region { #define FW_ADDSTATE_DHCPv4_LEASE_EXPIRED 0x0008 #define FW_ADDSTATE_LINK_UP 0x0010 #define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020 + #define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B +#define IPV6_DEFAULT_DDB_ENTRY 0x0001 + #define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074 #define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */ #define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077 @@ -434,6 +457,14 @@ struct qla_flt_region { #define ACB_STATE_VALID 0x05 #define ACB_STATE_DISABLING 0x06 +/* FLASH offsets */ +#define FLASH_SEGMENT_IFCB 0x04000000 + +#define FLASH_OPT_RMW_HOLD 0 +#define FLASH_OPT_RMW_INIT 1 +#define FLASH_OPT_COMMIT 2 +#define FLASH_OPT_RMW_COMMIT 3 + /*************************************************************************/ /* Host Adapter Initialization Control Block (from host) */ @@ -455,7 +486,8 @@ struct addr_ctrl_blk { uint8_t res0; /* 07 */ uint16_t eth_mtu_size; /* 08-09 */ uint16_t add_fw_options; /* 0A-0B */ -#define SERIALIZE_TASK_MGMT 0x0400 +#define ADFWOPT_SERIALIZE_TASK_MGMT 0x0400 +#define ADFWOPT_AUTOCONN_DISABLE 0x0002 uint8_t hb_interval; /* 0C */ uint8_t inst_num; /* 0D */ @@ -473,8 +505,10 @@ struct addr_ctrl_blk { uint16_t iscsi_opts; /* 30-31 */ uint16_t ipv4_tcp_opts; /* 32-33 */ +#define TCPOPT_DHCP_ENABLE 0x0200 uint16_t ipv4_ip_opts; /* 34-35 */ -#define IPOPT_IPv4_PROTOCOL_ENABLE 0x8000 +#define IPOPT_IPV4_PROTOCOL_ENABLE 0x8000 +#define IPOPT_VLAN_TAGGING_ENABLE 0x2000 uint16_t iscsi_max_pdu_size; /* 36-37 */ uint8_t ipv4_tos; /* 38 */ @@ -526,6 +560,7 @@ struct addr_ctrl_blk { uint16_t ipv6_port; /* 204-205 */ uint16_t ipv6_opts; /* 206-207 */ #define IPV6_OPT_IPV6_PROTOCOL_ENABLE 0x8000 +#define IPV6_OPT_VLAN_TAGGING_ENABLE 0x2000 uint16_t ipv6_addtl_opts; /* 208-209 */ #define IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE 0x0002 /* Pri ACB @@ -574,13 +609,105 @@ struct init_fw_ctrl_blk { /* struct addr_ctrl_blk sec;*/ }; +#define PRIMARI_ACB 0 +#define SECONDARY_ACB 1 + +struct addr_ctrl_blk_def { + uint8_t reserved1[1]; /* 00 */ + uint8_t control; /* 01 */ + uint8_t reserved2[11]; /* 02-0C */ + uint8_t inst_num; /* 0D */ + uint8_t reserved3[34]; /* 0E-2F */ + uint16_t iscsi_opts; /* 30-31 */ + uint16_t ipv4_tcp_opts; /* 32-33 */ + uint16_t ipv4_ip_opts; /* 34-35 */ + uint16_t iscsi_max_pdu_size; /* 36-37 */ + uint8_t ipv4_tos; /* 38 */ + uint8_t ipv4_ttl; /* 39 */ + uint8_t reserved4[2]; /* 3A-3B */ + uint16_t def_timeout; /* 3C-3D */ + uint16_t iscsi_fburst_len; /* 3E-3F */ + uint8_t reserved5[4]; /* 40-43 */ + uint16_t iscsi_max_outstnd_r2t; /* 44-45 */ + uint8_t reserved6[2]; /* 46-47 */ + uint16_t ipv4_port; /* 48-49 */ + uint16_t iscsi_max_burst_len; /* 4A-4B */ + uint8_t reserved7[4]; /* 4C-4F */ + uint8_t ipv4_addr[4]; /* 50-53 */ + uint16_t ipv4_vlan_tag; /* 54-55 */ + uint8_t ipv4_addr_state; /* 56 */ + uint8_t ipv4_cacheid; /* 57 */ + uint8_t reserved8[8]; /* 58-5F */ + uint8_t ipv4_subnet[4]; /* 60-63 */ + uint8_t reserved9[12]; /* 64-6F */ + uint8_t ipv4_gw_addr[4]; /* 70-73 */ + uint8_t reserved10[84]; /* 74-C7 */ + uint8_t abort_timer; /* C8 */ + uint8_t ipv4_tcp_wsf; /* C9 */ + uint8_t reserved11[10]; /* CA-D3 */ + uint8_t ipv4_dhcp_vid_len; /* D4 */ + uint8_t ipv4_dhcp_vid[11]; /* D5-DF */ + uint8_t reserved12[20]; /* E0-F3 */ + uint8_t ipv4_dhcp_alt_cid_len; /* F4 */ + uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */ + uint8_t iscsi_name[224]; /* 100-1DF */ + uint8_t reserved13[32]; /* 1E0-1FF */ + uint32_t cookie; /* 200-203 */ + uint16_t ipv6_port; /* 204-205 */ + uint16_t ipv6_opts; /* 206-207 */ + uint16_t ipv6_addtl_opts; /* 208-209 */ + uint16_t ipv6_tcp_opts; /* 20A-20B */ + uint8_t ipv6_tcp_wsf; /* 20C */ + uint16_t ipv6_flow_lbl; /* 20D-20F */ + uint8_t ipv6_dflt_rtr_addr[16]; /* 210-21F */ + uint16_t ipv6_vlan_tag; /* 220-221 */ + uint8_t ipv6_lnk_lcl_addr_state; /* 222 */ + uint8_t ipv6_addr0_state; /* 223 */ + uint8_t ipv6_addr1_state; /* 224 */ + uint8_t ipv6_dflt_rtr_state; /* 225 */ + uint8_t ipv6_traffic_class; /* 226 */ + uint8_t ipv6_hop_limit; /* 227 */ + uint8_t ipv6_if_id[8]; /* 228-22F */ + uint8_t ipv6_addr0[16]; /* 230-23F */ + uint8_t ipv6_addr1[16]; /* 240-24F */ + uint32_t ipv6_nd_reach_time; /* 250-253 */ + uint32_t ipv6_nd_rexmit_timer; /* 254-257 */ + uint32_t ipv6_nd_stale_timeout; /* 258-25B */ + uint8_t ipv6_dup_addr_detect_count; /* 25C */ + uint8_t ipv6_cache_id; /* 25D */ + uint8_t reserved14[18]; /* 25E-26F */ + uint32_t ipv6_gw_advrt_mtu; /* 270-273 */ + uint8_t reserved15[140]; /* 274-2FF */ +}; + /*************************************************************************/ +#define MAX_CHAP_ENTRIES_40XX 128 +#define MAX_CHAP_ENTRIES_82XX 1024 +#define MAX_RESRV_CHAP_IDX 3 +#define FLASH_CHAP_OFFSET 0x06000000 + +struct ql4_chap_table { + uint16_t link; + uint8_t flags; + uint8_t secret_len; +#define MIN_CHAP_SECRET_LEN 12 +#define MAX_CHAP_SECRET_LEN 100 + uint8_t secret[MAX_CHAP_SECRET_LEN]; +#define MAX_CHAP_NAME_LEN 256 + uint8_t name[MAX_CHAP_NAME_LEN]; + uint16_t reserved; +#define CHAP_VALID_COOKIE 0x4092 +#define CHAP_INVALID_COOKIE 0xFFEE + uint16_t cookie; +}; + struct dev_db_entry { uint16_t options; /* 00-01 */ #define DDB_OPT_DISC_SESSION 0x10 #define DDB_OPT_TARGET 0x02 /* device is a target */ #define DDB_OPT_IPV6_DEVICE 0x100 +#define DDB_OPT_AUTO_SENDTGTS_DISABLE 0x40 #define DDB_OPT_IPV6_NULL_LINK_LOCAL 0x800 /* post connection */ #define DDB_OPT_IPV6_FW_DEFINED_LINK_LOCAL 0x800 /* pre connection */ @@ -591,6 +718,7 @@ struct dev_db_entry { uint16_t tcp_options; /* 0A-0B */ uint16_t ip_options; /* 0C-0D */ uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */ +#define BYTE_UNITS 512 uint32_t res1; /* 10-13 */ uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */ uint16_t iscsi_first_burst_len; /* 16-17 */ @@ -627,7 +755,10 @@ struct dev_db_entry { uint8_t tcp_rcv_wsf; /* 1C7 */ uint32_t stat_sn; /* 1C8-1CB */ uint32_t exp_stat_sn; /* 1CC-1CF */ - uint8_t res6[0x30]; /* 1D0-1FF */ + uint8_t res6[0x2b]; /* 1D0-1FB */ +#define DDB_VALID_COOKIE 0x9034 + uint16_t cookie; /* 1FC-1FD */ + uint16_t len; /* 1FE-1FF */ }; /*************************************************************************/ @@ -639,6 +770,14 @@ struct dev_db_entry { #define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes * for EOF * signature */ +#define FLASH_RAW_ACCESS_ADDR 0x8e000000 + +#define BOOT_PARAM_OFFSET_PORT0 0x3b0 +#define BOOT_PARAM_OFFSET_PORT1 0x7b0 + +#define FLASH_OFFSET_DB_INFO 0x05000000 +#define FLASH_OFFSET_DB_END (FLASH_OFFSET_DB_INFO + 0x7fff) + struct sys_info_phys_addr { uint8_t address[6]; /* 00-05 */ @@ -774,6 +913,7 @@ struct qla4_header { uint8_t entryStatus; uint8_t systemDefined; +#define SD_ISCSI_PDU 0x01 uint8_t entryCount; /* SyetemDefined definition */ @@ -931,21 +1071,22 @@ struct passthru0 { struct qla4_header hdr; /* 00-03 */ uint32_t handle; /* 04-07 */ uint16_t target; /* 08-09 */ - uint16_t connectionID; /* 0A-0B */ + uint16_t connection_id; /* 0A-0B */ #define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000) - uint16_t controlFlags; /* 0C-0D */ + uint16_t control_flags; /* 0C-0D */ #define PT_FLAG_ETHERNET_FRAME 0x8000 #define PT_FLAG_ISNS_PDU 0x8000 #define PT_FLAG_SEND_BUFFER 0x0200 #define PT_FLAG_WAIT_4_RESPONSE 0x0100 +#define PT_FLAG_ISCSI_PDU 0x1000 uint16_t timeout; /* 0E-0F */ #define PT_DEFAULT_TIMEOUT 30 /* seconds */ - struct data_seg_a64 outDataSeg64; /* 10-1B */ + struct data_seg_a64 out_dsd; /* 10-1B */ uint32_t res1; /* 1C-1F */ - struct data_seg_a64 inDataSeg64; /* 20-2B */ + struct data_seg_a64 in_dsd; /* 20-2B */ uint8_t res2[20]; /* 2C-3F */ }; @@ -978,4 +1119,43 @@ struct response { #define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ }; +struct ql_iscsi_stats { + uint8_t reserved1[656]; /* 0000-028F */ + uint32_t tx_cmd_pdu; /* 0290-0293 */ + uint32_t tx_resp_pdu; /* 0294-0297 */ + uint32_t rx_cmd_pdu; /* 0298-029B */ + uint32_t rx_resp_pdu; /* 029C-029F */ + + uint64_t tx_data_octets; /* 02A0-02A7 */ + uint64_t rx_data_octets; /* 02A8-02AF */ + + uint32_t hdr_digest_err; /* 02B0–02B3 */ + uint32_t data_digest_err; /* 02B4–02B7 */ + uint32_t conn_timeout_err; /* 02B8–02BB */ + uint32_t framing_err; /* 02BC–02BF */ + + uint32_t tx_nopout_pdus; /* 02C0–02C3 */ + uint32_t tx_scsi_cmd_pdus; /* 02C4–02C7 */ + uint32_t tx_tmf_cmd_pdus; /* 02C8–02CB */ + uint32_t tx_login_cmd_pdus; /* 02CC–02CF */ + uint32_t tx_text_cmd_pdus; /* 02D0–02D3 */ + uint32_t tx_scsi_write_pdus; /* 02D4–02D7 */ + uint32_t tx_logout_cmd_pdus; /* 02D8–02DB */ + uint32_t tx_snack_req_pdus; /* 02DC–02DF */ + + uint32_t rx_nopin_pdus; /* 02E0–02E3 */ + uint32_t rx_scsi_resp_pdus; /* 02E4–02E7 */ + uint32_t rx_tmf_resp_pdus; /* 02E8–02EB */ + uint32_t rx_login_resp_pdus; /* 02EC–02EF */ + uint32_t rx_text_resp_pdus; /* 02F0–02F3 */ + uint32_t rx_scsi_read_pdus; /* 02F4–02F7 */ + uint32_t rx_logout_resp_pdus; /* 02F8–02FB */ + + uint32_t rx_r2t_pdus; /* 02FC–02FF */ + uint32_t rx_async_pdus; /* 0300–0303 */ + uint32_t rx_reject_pdus; /* 0304–0307 */ + + uint8_t reserved2[264]; /* 0x0308 - 0x040F */ +}; + #endif /* _QLA4X_FW_H */ diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h index a53a256c1f8d..160db9d5ea21 100644 --- a/drivers/scsi/qla4xxx/ql4_glbl.h +++ b/drivers/scsi/qla4xxx/ql4_glbl.h @@ -12,20 +12,15 @@ struct iscsi_cls_conn; int qla4xxx_hw_reset(struct scsi_qla_host *ha); int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); -int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb); -int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, - uint8_t renew_ddb_list); +int qla4xxx_initialize_adapter(struct scsi_qla_host *ha); int qla4xxx_soft_reset(struct scsi_qla_host *ha); irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id); -void qla4xxx_free_ddb_list(struct scsi_qla_host *ha); void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry); void qla4xxx_process_aen(struct scsi_qla_host *ha, uint8_t process_aen); int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host *ha); -int qla4xxx_relogin_device(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry); int qla4xxx_abort_task(struct scsi_qla_host *ha, struct srb *srb); int qla4xxx_reset_lun(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, int lun); @@ -51,15 +46,24 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha, uint16_t *connection_id); int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, - dma_addr_t fw_ddb_entry_dma); - -void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry); + dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts); +uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma); +int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + uint16_t connection_id, + uint16_t option); +int qla4xxx_disable_acb(struct scsi_qla_host *ha); +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma); +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len); +int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx, + uint32_t ip_idx, uint32_t *sts); +void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session); u16 rd_nvram_word(struct scsi_qla_host *ha, int offset); +u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset); void qla4xxx_get_crash_record(struct scsi_qla_host *ha); -struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha); -int qla4xxx_add_sess(struct ddb_entry *); -void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry); int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host *ha); int qla4xxx_about_firmware(struct scsi_qla_host *ha); void qla4xxx_interrupt_service_routine(struct scsi_qla_host *ha, @@ -68,14 +72,13 @@ int qla4xxx_init_rings(struct scsi_qla_host *ha); void qla4xxx_srb_compl(struct kref *ref); struct srb *qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index); -int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha); int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state, uint32_t conn_error); void qla4xxx_dump_buffer(void *b, uint32_t size); int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, int lun, uint16_t mrkr_mod); -int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err); - +int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t length, uint32_t options); int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount, uint8_t outCount, uint32_t *mbx_cmd, uint32_t *mbx_sts); @@ -95,6 +98,11 @@ void qla4xxx_wake_dpc(struct scsi_qla_host *ha); void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha); void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha); void qla4xxx_dump_registers(struct scsi_qla_host *ha); +uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, + uint32_t *mbox_cmd, + uint32_t *mbox_sts, + struct addr_ctrl_blk *init_fw_cb, + dma_addr_t init_fw_cb_dma); void qla4_8xxx_pci_config(struct scsi_qla_host *); int qla4_8xxx_iospace_config(struct scsi_qla_host *ha); @@ -134,6 +142,37 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha); void qla4_8xxx_need_qsnt_handler(struct scsi_qla_host *ha); void qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha); void qla4_8xxx_set_drv_active(struct scsi_qla_host *ha); +int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index); +int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct iscsi_cls_conn *cls_conn, + uint32_t *mbx_sts); +int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, int options); +int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index, + uint32_t *mbx_sts); +int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index); +int qla4xxx_send_passthru0(struct iscsi_task *task); +int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, + uint16_t stats_size, dma_addr_t stats_dma); +void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry); +int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index); +int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, + char *password, uint16_t idx); +int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size); +int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size); +int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, + uint32_t region, uint32_t field0, + uint32_t field1); + +/* BSG Functions */ +int qla4xxx_bsg_request(struct bsg_job *bsg_job); +int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); extern int ql4xextended_error_logging; extern int ql4xdontresethba; diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c index 42ed5db2d530..3075fbaef553 100644 --- a/drivers/scsi/qla4xxx/ql4_init.c +++ b/drivers/scsi/qla4xxx/ql4_init.c @@ -11,9 +11,6 @@ #include "ql4_dbg.h" #include "ql4_inline.h" -static struct ddb_entry *qla4xxx_alloc_ddb(struct scsi_qla_host *ha, - uint32_t fw_ddb_index); - static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) { uint32_t value; @@ -48,41 +45,15 @@ static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) * @ha: pointer to host adapter structure. * @ddb_entry: pointer to device database entry * - * This routine deallocates and unlinks the specified ddb_entry from the - * adapter's + * This routine marks a DDB entry INVALID **/ void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { - /* Remove device entry from list */ - list_del_init(&ddb_entry->list); - /* Remove device pointer from index mapping arrays */ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = (struct ddb_entry *) INVALID_ENTRY; ha->tot_ddbs--; - - /* Free memory and scsi-ml struct for device entry */ - qla4xxx_destroy_sess(ddb_entry); -} - -/** - * qla4xxx_free_ddb_list - deallocate all ddbs - * @ha: pointer to host adapter structure. - * - * This routine deallocates and removes all devices on the sppecified adapter. - **/ -void qla4xxx_free_ddb_list(struct scsi_qla_host *ha) -{ - struct list_head *ptr; - struct ddb_entry *ddb_entry; - - while (!list_empty(&ha->ddb_list)) { - ptr = ha->ddb_list.next; - /* Free memory for device entry and remove */ - ddb_entry = list_entry(ptr, struct ddb_entry, list); - qla4xxx_free_ddb(ha, ddb_entry); - } } /** @@ -236,38 +207,44 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { ipv4_wait = 1; } - if (((ha->ipv6_addl_options & - IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && - ((ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) || - (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) || - (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) { + if (((ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && + ((ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_ACQUIRING) || + (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_ACQUIRING) || + (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_ACQUIRING))) { ipv6_wait = 1; - if ((ha->ipv6_link_local_state == - IP_ADDRSTATE_PREFERRED) || - (ha->ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) || - (ha->ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) { + if ((ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_PREFERRED) || + (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_PREFERRED) || + (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_PREFERRED)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "Preferred IP configured." " Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } - if (memcmp(&ha->ipv6_default_router_addr, ip_address, - IPv6_ADDR_LEN) == 0) { + if (memcmp(&ha->ip_config.ipv6_default_router_addr, + ip_address, IPv6_ADDR_LEN) == 0) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "No Router configured. " "Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } - if ((ha->ipv6_default_router_state == - IPV6_RTRSTATE_MANUAL) && - (ha->ipv6_link_local_state == - IP_ADDRSTATE_TENTATIVE) && - (memcmp(&ha->ipv6_link_local_addr, - &ha->ipv6_default_router_addr, 4) == 0)) { + if ((ha->ip_config.ipv6_default_router_state == + IPV6_RTRSTATE_MANUAL) && + (ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_TENTATIVE) && + (memcmp(&ha->ip_config.ipv6_link_local_addr, + &ha->ip_config.ipv6_default_router_addr, 4) == + 0)) { DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " "IP configured. Don't wait!\n", ha->host_no, __func__)); @@ -279,11 +256,14 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) "IP(s) \"", ha->host_no, __func__)); if (ipv4_wait) DEBUG2(printk("IPv4 ")); - if (ha->ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) + if (ha->ip_config.ipv6_link_local_state == + IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6LinkLocal ")); - if (ha->ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) + if (ha->ip_config.ipv6_addr0_state == + IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr0 ")); - if (ha->ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING) + if (ha->ip_config.ipv6_addr1_state == + IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr1 ")); DEBUG2(printk("\"\n")); } @@ -466,486 +446,19 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) return qla4xxx_get_firmware_status(ha); } -static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, - uint32_t fw_ddb_index, - uint32_t *new_tgt) -{ - struct dev_db_entry *fw_ddb_entry = NULL; - dma_addr_t fw_ddb_entry_dma; - struct ddb_entry *ddb_entry = NULL; - int found = 0; - uint32_t device_state; - - *new_tgt = 0; - /* Make sure the dma buffer is valid */ - fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, - sizeof(*fw_ddb_entry), - &fw_ddb_entry_dma, GFP_KERNEL); - if (fw_ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", - ha->host_no, __func__)); - goto exit_get_ddb_entry_no_free; - } - - if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, - fw_ddb_entry_dma, NULL, NULL, - &device_state, NULL, NULL, NULL) == - QLA_ERROR) { - DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " - "fw_ddb_index %d\n", ha->host_no, __func__, - fw_ddb_index)); - goto exit_get_ddb_entry; - } - - /* Allocate DDB if not already allocated. */ - DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, - __func__, fw_ddb_index)); - list_for_each_entry(ddb_entry, &ha->ddb_list, list) { - if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, - ISCSI_NAME_SIZE) == 0) && - (ddb_entry->tpgt == - le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) && - (memcmp(ddb_entry->isid, fw_ddb_entry->isid, - sizeof(ddb_entry->isid)) == 0)) { - found++; - break; - } - } - - /* if not found allocate new ddb */ - if (!found) { - DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating " - "new ddb\n", ha->host_no, __func__, - fw_ddb_index)); - *new_tgt = 1; - ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); - } - -exit_get_ddb_entry: - dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, - fw_ddb_entry_dma); - -exit_get_ddb_entry_no_free: - return ddb_entry; -} - -/** - * qla4xxx_update_ddb_entry - update driver's internal ddb - * @ha: pointer to host adapter structure. - * @ddb_entry: pointer to device database structure to be filled - * @fw_ddb_index: index of the ddb entry in fw ddb table - * - * This routine updates the driver's internal device database entry - * with information retrieved from the firmware's device database - * entry for the specified device. The ddb_entry->fw_ddb_index field - * must be initialized prior to calling this routine - * - **/ -static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry, - uint32_t fw_ddb_index) -{ - struct dev_db_entry *fw_ddb_entry = NULL; - dma_addr_t fw_ddb_entry_dma; - int status = QLA_ERROR; - uint32_t conn_err; - - if (ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no, - __func__)); - - goto exit_update_ddb_no_free; - } - - /* Make sure the dma buffer is valid */ - fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, - sizeof(*fw_ddb_entry), - &fw_ddb_entry_dma, GFP_KERNEL); - if (fw_ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", - ha->host_no, __func__)); - - goto exit_update_ddb_no_free; - } - - if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, - fw_ddb_entry_dma, NULL, NULL, - &ddb_entry->fw_ddb_device_state, &conn_err, - &ddb_entry->tcp_source_port_num, - &ddb_entry->connection_id) == - QLA_ERROR) { - DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " - "fw_ddb_index %d\n", ha->host_no, __func__, - fw_ddb_index)); - - goto exit_update_ddb; - } - - status = QLA_SUCCESS; - ddb_entry->options = le16_to_cpu(fw_ddb_entry->options); - ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid); - ddb_entry->task_mgmt_timeout = - le16_to_cpu(fw_ddb_entry->def_timeout); - ddb_entry->CmdSn = 0; - ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle); - ddb_entry->default_relogin_timeout = - le16_to_cpu(fw_ddb_entry->def_timeout); - ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); - - /* Update index in case it changed */ - ddb_entry->fw_ddb_index = fw_ddb_index; - ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; - - ddb_entry->port = le16_to_cpu(fw_ddb_entry->port); - ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); - memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid)); - - memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], - min(sizeof(ddb_entry->iscsi_name), - sizeof(fw_ddb_entry->iscsi_name))); - memcpy(&ddb_entry->iscsi_alias[0], &fw_ddb_entry->iscsi_alias[0], - min(sizeof(ddb_entry->iscsi_alias), - sizeof(fw_ddb_entry->iscsi_alias))); - memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], - min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); - - ddb_entry->iscsi_max_burst_len = fw_ddb_entry->iscsi_max_burst_len; - ddb_entry->iscsi_max_outsnd_r2t = fw_ddb_entry->iscsi_max_outsnd_r2t; - ddb_entry->iscsi_first_burst_len = fw_ddb_entry->iscsi_first_burst_len; - ddb_entry->iscsi_max_rcv_data_seg_len = - fw_ddb_entry->iscsi_max_rcv_data_seg_len; - ddb_entry->iscsi_max_snd_data_seg_len = - fw_ddb_entry->iscsi_max_snd_data_seg_len; - - if (ddb_entry->options & DDB_OPT_IPV6_DEVICE) { - memcpy(&ddb_entry->remote_ipv6_addr, - fw_ddb_entry->ip_addr, - min(sizeof(ddb_entry->remote_ipv6_addr), - sizeof(fw_ddb_entry->ip_addr))); - memcpy(&ddb_entry->link_local_ipv6_addr, - fw_ddb_entry->link_local_ipv6_addr, - min(sizeof(ddb_entry->link_local_ipv6_addr), - sizeof(fw_ddb_entry->link_local_ipv6_addr))); - - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x" - " ConnErr %08x IP %pI6 " - ":%04d \"%s\"\n", - __func__, fw_ddb_index, - ddb_entry->fw_ddb_device_state, - conn_err, fw_ddb_entry->ip_addr, - le16_to_cpu(fw_ddb_entry->port), - fw_ddb_entry->iscsi_name)); - } else - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB[%d] State %04x" - " ConnErr %08x IP %pI4 " - ":%04d \"%s\"\n", - __func__, fw_ddb_index, - ddb_entry->fw_ddb_device_state, - conn_err, fw_ddb_entry->ip_addr, - le16_to_cpu(fw_ddb_entry->port), - fw_ddb_entry->iscsi_name)); -exit_update_ddb: - if (fw_ddb_entry) - dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), - fw_ddb_entry, fw_ddb_entry_dma); - -exit_update_ddb_no_free: - return status; -} - -/** - * qla4xxx_alloc_ddb - allocate device database entry - * @ha: Pointer to host adapter structure. - * @fw_ddb_index: Firmware's device database index - * - * This routine allocates a ddb_entry, ititializes some values, and - * inserts it into the ddb list. - **/ -static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, - uint32_t fw_ddb_index) +static void qla4xxx_set_model_info(struct scsi_qla_host *ha) { - struct ddb_entry *ddb_entry; - - DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no, - __func__, fw_ddb_index)); - - ddb_entry = qla4xxx_alloc_sess(ha); - if (ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " - "to add fw_ddb_index [%d]\n", - ha->host_no, __func__, fw_ddb_index)); - return ddb_entry; - } + uint16_t board_id_string[8]; + int i; + int size = sizeof(ha->nvram->isp4022.boardIdStr); + int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2; - ddb_entry->fw_ddb_index = fw_ddb_index; - atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); - atomic_set(&ddb_entry->relogin_timer, 0); - atomic_set(&ddb_entry->relogin_retry_count, 0); - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - list_add_tail(&ddb_entry->list, &ha->ddb_list); - ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; - ha->tot_ddbs++; - - return ddb_entry; -} - -/** - * qla4_is_relogin_allowed - Are we allowed to login? - * @ha: Pointer to host adapter structure. - * @conn_err: Last connection error associated with the ddb - * - * This routine tests the given connection error to determine if - * we are allowed to login. - **/ -int qla4_is_relogin_allowed(struct scsi_qla_host *ha, uint32_t conn_err) -{ - uint32_t err_code, login_rsp_sts_class; - int relogin = 1; - - err_code = ((conn_err & 0x00ff0000) >> 16); - login_rsp_sts_class = ((conn_err & 0x0000ff00) >> 8); - if (err_code == 0x1c || err_code == 0x06) { - DEBUG2(ql4_printk(KERN_INFO, ha, - ": conn_err=0x%08x, send target completed" - " or access denied failure\n", conn_err)); - relogin = 0; - } - if ((err_code == 0x08) && (login_rsp_sts_class == 0x02)) { - /* Login Response PDU returned an error. - Login Response Status in Error Code Detail - indicates login should not be retried.*/ - DEBUG2(ql4_printk(KERN_INFO, ha, - ": conn_err=0x%08x, do not retry relogin\n", - conn_err)); - relogin = 0; + for (i = 0; i < (size / 2) ; i++) { + board_id_string[i] = rd_nvram_word(ha, offset); + offset += 1; } - return relogin; -} - -static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) -{ - unsigned long wtime; - - /* Flush the 0x8014 AEN from the firmware as a result of - * Auto connect. We are basically doing get_firmware_ddb() - * to determine whether we need to log back in or not. - * Trying to do a set ddb before we have processed 0x8014 - * will result in another set_ddb() for the same ddb. In other - * words there will be stale entries in the aen_q. - */ - wtime = jiffies + (2 * HZ); - do { - if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) - if (ha->firmware_state & (BIT_2 | BIT_0)) - return; - - if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) - qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); - - msleep(1000); - } while (!time_after_eq(jiffies, wtime)); -} - -/** - * qla4xxx_build_ddb_list - builds driver ddb list - * @ha: Pointer to host adapter structure. - * - * This routine searches for all valid firmware ddb entries and builds - * an internal ddb list. Ddbs that are considered valid are those with - * a device state of SESSION_ACTIVE. - * A relogin (set_ddb) is issued for DDBs that are not online. - **/ -static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) -{ - int status = QLA_ERROR; - uint32_t fw_ddb_index = 0; - uint32_t next_fw_ddb_index = 0; - uint32_t ddb_state; - uint32_t conn_err; - struct ddb_entry *ddb_entry; - struct dev_db_entry *fw_ddb_entry = NULL; - dma_addr_t fw_ddb_entry_dma; - uint32_t ipv6_device; - uint32_t new_tgt; - - qla4xxx_flush_AENS(ha); - - fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), - &fw_ddb_entry_dma, GFP_KERNEL); - if (fw_ddb_entry == NULL) { - DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DMA alloc failed\n", - __func__)); - - goto exit_build_ddb_list_no_free; - } - - ql4_printk(KERN_INFO, ha, "Initializing DDBs ...\n"); - for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; - fw_ddb_index = next_fw_ddb_index) { - /* First, let's see if a device exists here */ - if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, - 0, NULL, &next_fw_ddb_index, - &ddb_state, &conn_err, - NULL, NULL) == - QLA_ERROR) { - DEBUG2(printk("scsi%ld: %s: get_ddb_entry, " - "fw_ddb_index %d failed", ha->host_no, - __func__, fw_ddb_index)); - goto exit_build_ddb_list; - } - - DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, " - "next_fw_ddb_index=%d.\n", ha->host_no, __func__, - fw_ddb_index, ddb_state, next_fw_ddb_index)); - - /* Issue relogin, if necessary. */ - if (ddb_state == DDB_DS_SESSION_FAILED || - ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) { - /* Try and login to device */ - DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n", - ha->host_no, __func__, fw_ddb_index)); - ipv6_device = le16_to_cpu(fw_ddb_entry->options) & - DDB_OPT_IPV6_DEVICE; - if (qla4_is_relogin_allowed(ha, conn_err) && - ((!ipv6_device && - *((uint32_t *)fw_ddb_entry->ip_addr)) - || ipv6_device)) { - qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); - if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, - NULL, 0, NULL, - &next_fw_ddb_index, - &ddb_state, &conn_err, - NULL, NULL) - == QLA_ERROR) { - DEBUG2(printk("scsi%ld: %s:" - "get_ddb_entry %d failed\n", - ha->host_no, - __func__, fw_ddb_index)); - goto exit_build_ddb_list; - } - } - } - - if (ddb_state != DDB_DS_SESSION_ACTIVE) - goto next_one; - /* - * if fw_ddb with session active state found, - * add to ddb_list - */ - DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n", - ha->host_no, __func__, fw_ddb_index)); - - /* Add DDB to internal our ddb list. */ - ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt); - if (ddb_entry == NULL) { - DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " - "for device at fw_ddb_index %d\n", - ha->host_no, __func__, fw_ddb_index)); - goto exit_build_ddb_list; - } - /* Fill in the device structure */ - if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == - QLA_ERROR) { - ha->fw_ddb_index_map[fw_ddb_index] = - (struct ddb_entry *)INVALID_ENTRY; - - DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed " - "for fw_ddb_index %d.\n", - ha->host_no, __func__, fw_ddb_index)); - goto exit_build_ddb_list; - } - -next_one: - /* We know we've reached the last device when - * next_fw_ddb_index is 0 */ - if (next_fw_ddb_index == 0) - break; - } - - status = QLA_SUCCESS; - ql4_printk(KERN_INFO, ha, "DDB list done..\n"); - -exit_build_ddb_list: - dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, - fw_ddb_entry_dma); - -exit_build_ddb_list_no_free: - return status; -} - -static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) -{ - uint16_t fw_ddb_index; - int status = QLA_SUCCESS; - - /* free the ddb list if is not empty */ - if (!list_empty(&ha->ddb_list)) - qla4xxx_free_ddb_list(ha); - - for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) - ha->fw_ddb_index_map[fw_ddb_index] = - (struct ddb_entry *)INVALID_ENTRY; - - ha->tot_ddbs = 0; - - /* Perform device discovery and build ddb list. */ - status = qla4xxx_build_ddb_list(ha); - - return status; -} - -/** - * qla4xxx_reinitialize_ddb_list - update the driver ddb list - * @ha: pointer to host adapter structure. - * - * This routine obtains device information from the F/W database after - * firmware or adapter resets. The device table is preserved. - **/ -int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha) -{ - int status = QLA_SUCCESS; - struct ddb_entry *ddb_entry, *detemp; - - /* Update the device information for all devices. */ - list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) { - qla4xxx_update_ddb_entry(ha, ddb_entry, - ddb_entry->fw_ddb_index); - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked " - "ONLINE\n", ha->host_no, __func__, - ddb_entry->fw_ddb_index)); - iscsi_unblock_session(ddb_entry->sess); - } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) - qla4xxx_mark_device_missing(ha, ddb_entry); - } - return status; -} - -/** - * qla4xxx_relogin_device - re-establish session - * @ha: Pointer to host adapter structure. - * @ddb_entry: Pointer to device database entry - * - * This routine does a session relogin with the specified device. - * The ddb entry must be assigned prior to making this call. - **/ -int qla4xxx_relogin_device(struct scsi_qla_host *ha, - struct ddb_entry * ddb_entry) -{ - uint16_t relogin_timer; - - relogin_timer = max(ddb_entry->default_relogin_timeout, - (uint16_t)RELOGIN_TOV); - atomic_set(&ddb_entry->relogin_timer, relogin_timer); - - DEBUG2(printk("scsi%ld: Relogin ddb [%d]. TOV=%d\n", ha->host_no, - ddb_entry->fw_ddb_index, relogin_timer)); - - qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0); - - return QLA_SUCCESS; + memcpy(ha->model_name, board_id_string, size); } static int qla4xxx_config_nvram(struct scsi_qla_host *ha) @@ -983,6 +496,12 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha) else return QLA_ERROR; } + + if (is_qla4022(ha) || is_qla4032(ha)) + qla4xxx_set_model_info(ha); + else + strcpy(ha->model_name, "QLA4010"); + DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", ha->host_no, __func__, extHwConfig.Asuint32_t)); @@ -1246,23 +765,56 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha) } return status; } +/** + * qla4xxx_free_ddb_index - Free DDBs reserved by firmware + * @ha: pointer to adapter structure + * + * Since firmware is not running in autoconnect mode the DDB indices should + * be freed so that when login happens from user space there are free DDB + * indices available. + **/ +static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha) +{ + int max_ddbs; + int ret; + uint32_t idx = 0, next_idx = 0; + uint32_t state = 0, conn_err = 0; + + max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES : + MAX_DEV_DB_ENTRIES; + + for (idx = 0; idx < max_ddbs; idx = next_idx) { + ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, + &next_idx, &state, &conn_err, + NULL, NULL); + if (ret == QLA_ERROR) + continue; + if (state == DDB_DS_NO_CONNECTION_ACTIVE || + state == DDB_DS_SESSION_FAILED) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Freeing DDB index = 0x%x\n", idx)); + ret = qla4xxx_clear_ddb_entry(ha, idx); + if (ret == QLA_ERROR) + ql4_printk(KERN_ERR, ha, + "Unable to clear DDB index = " + "0x%x\n", idx); + } + if (next_idx == 0) + break; + } +} /** * qla4xxx_initialize_adapter - initiailizes hba * @ha: Pointer to host adapter structure. - * @renew_ddb_list: Indicates what to do with the adapter's ddb list - * after adapter recovery has completed. - * 0=preserve ddb list, 1=destroy and rebuild ddb list * * This routine parforms all of the steps necessary to initialize the adapter. * **/ -int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, - uint8_t renew_ddb_list) +int qla4xxx_initialize_adapter(struct scsi_qla_host *ha) { int status = QLA_ERROR; - int8_t ip_address[IP_ADDR_LEN] = {0} ; ha->eeprom_cmd_data = 0; @@ -1288,47 +840,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, if (status == QLA_ERROR) goto exit_init_hba; - /* - * FW is waiting to get an IP address from DHCP server: Skip building - * the ddb_list and wait for DHCP lease acquired aen to come in - * followed by 0x8014 aen" to trigger the tgt discovery process. - */ - if (ha->firmware_state & FW_STATE_CONFIGURING_IP) - goto exit_init_online; - - /* Skip device discovery if ip and subnet is zero */ - if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 || - memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0) - goto exit_init_online; + qla4xxx_free_ddb_index(ha); - if (renew_ddb_list == PRESERVE_DDB_LIST) { - /* - * We want to preserve lun states (i.e. suspended, etc.) - * for recovery initiated by the driver. So just update - * the device states for the existing ddb_list. - */ - qla4xxx_reinitialize_ddb_list(ha); - } else if (renew_ddb_list == REBUILD_DDB_LIST) { - /* - * We want to build the ddb_list from scratch during - * driver initialization and recovery initiated by the - * INT_HBA_RESET IOCTL. - */ - status = qla4xxx_initialize_ddb_list(ha); - if (status == QLA_ERROR) { - DEBUG2(printk("%s(%ld) Error occurred during build" - "ddb list\n", __func__, ha->host_no)); - goto exit_init_hba; - } - - } - if (!ha->tot_ddbs) { - DEBUG2(printk("scsi%ld: Failed to initialize devices or none " - "present in Firmware device database\n", - ha->host_no)); - } - -exit_init_online: set_bit(AF_ONLINE, &ha->flags); exit_init_hba: if (is_qla8022(ha) && (status == QLA_ERROR)) { @@ -1343,61 +856,6 @@ exit_init_hba: } /** - * qla4xxx_add_device_dynamically - ddb addition due to an AEN - * @ha: Pointer to host adapter structure. - * @fw_ddb_index: Firmware's device database index - * - * This routine processes adds a device as a result of an 8014h AEN. - **/ -static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha, - uint32_t fw_ddb_index) -{ - struct ddb_entry * ddb_entry; - uint32_t new_tgt; - - /* First allocate a device structure */ - ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt); - if (ddb_entry == NULL) { - DEBUG2(printk(KERN_WARNING - "scsi%ld: Unable to allocate memory to add " - "fw_ddb_index %d\n", ha->host_no, fw_ddb_index)); - return; - } - - if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) { - /* Target has been bound to a new fw_ddb_index */ - qla4xxx_free_ddb(ha, ddb_entry); - ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); - if (ddb_entry == NULL) { - DEBUG2(printk(KERN_WARNING - "scsi%ld: Unable to allocate memory" - " to add fw_ddb_index %d\n", - ha->host_no, fw_ddb_index)); - return; - } - } - if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == - QLA_ERROR) { - ha->fw_ddb_index_map[fw_ddb_index] = - (struct ddb_entry *)INVALID_ENTRY; - DEBUG2(printk(KERN_WARNING - "scsi%ld: failed to add new device at index " - "[%d]\n Unable to retrieve fw ddb entry\n", - ha->host_no, fw_ddb_index)); - qla4xxx_free_ddb(ha, ddb_entry); - return; - } - - if (qla4xxx_add_sess(ddb_entry)) { - DEBUG2(printk(KERN_WARNING - "scsi%ld: failed to add new device at index " - "[%d]\n Unable to add connection and session\n", - ha->host_no, fw_ddb_index)); - qla4xxx_free_ddb(ha, ddb_entry); - } -} - -/** * qla4xxx_process_ddb_changed - process ddb state change * @ha - Pointer to host adapter structure. * @fw_ddb_index - Firmware's device database index @@ -1409,88 +867,94 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state, uint32_t conn_err) { struct ddb_entry * ddb_entry; + uint32_t old_fw_ddb_device_state; + int status = QLA_ERROR; /* check for out of range index */ if (fw_ddb_index >= MAX_DDB_ENTRIES) - return QLA_ERROR; + goto exit_ddb_event; /* Get the corresponging ddb entry */ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); /* Device does not currently exist in our database. */ if (ddb_entry == NULL) { - if (state == DDB_DS_SESSION_ACTIVE) - qla4xxx_add_device_dynamically(ha, fw_ddb_index); - return QLA_SUCCESS; + ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n", + __func__, fw_ddb_index); + + if (state == DDB_DS_NO_CONNECTION_ACTIVE) + clear_bit(fw_ddb_index, ha->ddb_idx_map); + + goto exit_ddb_event; } - /* Device already exists in our database. */ - DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " - "index [%d]\n", ha->host_no, __func__, - ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); + old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: DDB - old state = 0x%x, new state = 0x%x for " + "index [%d]\n", __func__, + ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); ddb_entry->fw_ddb_device_state = state; - /* Device is back online. */ - if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) && - (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) { - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - atomic_set(&ddb_entry->relogin_retry_count, 0); - atomic_set(&ddb_entry->relogin_timer, 0); - clear_bit(DF_RELOGIN, &ddb_entry->flags); - iscsi_unblock_session(ddb_entry->sess); - iscsi_session_event(ddb_entry->sess, - ISCSI_KEVENT_CREATE_SESSION); - /* - * Change the lun state to READY in case the lun TIMEOUT before - * the device came back. - */ - } else if (ddb_entry->fw_ddb_device_state != DDB_DS_SESSION_ACTIVE) { - /* Device went away, mark device missing */ - if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) { - DEBUG2(ql4_printk(KERN_INFO, ha, "%s mark missing " - "ddb_entry 0x%p sess 0x%p conn 0x%p\n", - __func__, ddb_entry, - ddb_entry->sess, ddb_entry->conn)); - qla4xxx_mark_device_missing(ha, ddb_entry); - } - /* - * Relogin if device state changed to a not active state. - * However, do not relogin if a RELOGIN is in process, or - * we are not allowed to relogin to this DDB. - */ - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && - !test_bit(DF_RELOGIN, &ddb_entry->flags) && - qla4_is_relogin_allowed(ha, conn_err)) { + switch (old_fw_ddb_device_state) { + case DDB_DS_LOGIN_IN_PROCESS: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: + iscsi_conn_start(ddb_entry->conn); + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_LOGGED_IN); + qla4xxx_update_session_conn_param(ha, ddb_entry); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + case DDB_DS_NO_CONNECTION_ACTIVE: + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_FREE); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_ACTIVE: + switch (state) { + case DDB_DS_SESSION_FAILED: /* - * This triggers a relogin. After the relogin_timer - * expires, the relogin gets scheduled. We must wait a - * minimum amount of time since receiving an 0x8014 AEN - * with failed device_state or a logout response before - * we can issue another relogin. + * iscsi_session failure will cause userspace to + * stop the connection which in turn would block the + * iscsi_session and start relogin */ - /* Firmware pads this timeout: (time2wait +1). - * Driver retry to login should be longer than F/W. - * Otherwise F/W will fail - * set_ddb() mbx cmd with 0x4005 since it still - * counting down its time2wait. - */ - atomic_set(&ddb_entry->relogin_timer, 0); - atomic_set(&ddb_entry->retry_relogin_timer, - ddb_entry->default_time2wait + 4); - DEBUG(printk("scsi%ld: %s: ddb[%d] " - "initiate relogin after %d seconds\n", - ha->host_no, __func__, - ddb_entry->fw_ddb_index, - ddb_entry->default_time2wait + 4)); - } else { - DEBUG(printk("scsi%ld: %s: ddb[%d] " - "relogin not initiated, state = %d, " - "ddb_entry->flags = 0x%lx\n", - ha->host_no, __func__, - ddb_entry->fw_ddb_index, - ddb_entry->fw_ddb_device_state, - ddb_entry->flags)); + iscsi_session_failure(ddb_entry->sess->dd_data, + ISCSI_ERR_CONN_FAILED); + status = QLA_SUCCESS; + break; + case DDB_DS_NO_CONNECTION_ACTIVE: + clear_bit(fw_ddb_index, ha->ddb_idx_map); + status = QLA_SUCCESS; + break; + } + break; + case DDB_DS_SESSION_FAILED: + switch (state) { + case DDB_DS_SESSION_ACTIVE: + case DDB_DS_DISCOVERY: + iscsi_conn_start(ddb_entry->conn); + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_LOGGED_IN); + qla4xxx_update_session_conn_param(ha, ddb_entry); + status = QLA_SUCCESS; + break; + case DDB_DS_SESSION_FAILED: + iscsi_session_failure(ddb_entry->sess->dd_data, + ISCSI_ERR_CONN_FAILED); + status = QLA_SUCCESS; + break; } + break; + default: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", + __func__)); + break; } - return QLA_SUCCESS; + +exit_ddb_event: + return status; } diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c index 75fcd82a8fca..410669351906 100644 --- a/drivers/scsi/qla4xxx/ql4_iocb.c +++ b/drivers/scsi/qla4xxx/ql4_iocb.c @@ -313,10 +313,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb) cmd_entry->hdr.entryType = ET_COMMAND; cmd_entry->handle = cpu_to_le32(index); cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index); - cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id); int_to_scsilun(cmd->device->lun, &cmd_entry->lun); - cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd)); memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); @@ -381,3 +379,69 @@ queuing_error: return QLA_ERROR; } +int qla4xxx_send_passthru0(struct iscsi_task *task) +{ + struct passthru0 *passthru_iocb; + struct iscsi_session *sess = task->conn->session; + struct ddb_entry *ddb_entry = sess->dd_data; + struct scsi_qla_host *ha = ddb_entry->ha; + struct ql4_task_data *task_data = task->dd_data; + uint16_t ctrl_flags = 0; + unsigned long flags; + int ret = QLA_ERROR; + + spin_lock_irqsave(&ha->hardware_lock, flags); + task_data->iocb_req_cnt = 1; + /* Put the IOCB on the request queue */ + if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt)) + goto queuing_error; + + passthru_iocb = (struct passthru0 *) ha->request_ptr; + + memset(passthru_iocb, 0, sizeof(struct passthru0)); + passthru_iocb->hdr.entryType = ET_PASSTHRU0; + passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU; + passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt; + passthru_iocb->handle = task->itt; + passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index); + passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT); + + /* Setup the out & in DSDs */ + if (task_data->req_len) { + memcpy((uint8_t *)task_data->req_buffer + + sizeof(struct iscsi_hdr), task->data, task->data_count); + ctrl_flags |= PT_FLAG_SEND_BUFFER; + passthru_iocb->out_dsd.base.addrLow = + cpu_to_le32(LSDW(task_data->req_dma)); + passthru_iocb->out_dsd.base.addrHigh = + cpu_to_le32(MSDW(task_data->req_dma)); + passthru_iocb->out_dsd.count = + cpu_to_le32(task->data_count + + sizeof(struct iscsi_hdr)); + } + if (task_data->resp_len) { + passthru_iocb->in_dsd.base.addrLow = + cpu_to_le32(LSDW(task_data->resp_dma)); + passthru_iocb->in_dsd.base.addrHigh = + cpu_to_le32(MSDW(task_data->resp_dma)); + passthru_iocb->in_dsd.count = + cpu_to_le32(task_data->resp_len); + } + + ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE); + passthru_iocb->control_flags = cpu_to_le16(ctrl_flags); + + /* Update the request pointer */ + qla4xxx_advance_req_ring_ptr(ha); + wmb(); + + /* Track IOCB used */ + ha->iocb_cnt += task_data->iocb_req_cnt; + ha->req_q_count -= task_data->iocb_req_cnt; + ha->isp_ops->queue_iocb(ha); + ret = QLA_SUCCESS; + +queuing_error: + spin_unlock_irqrestore(&ha->hardware_lock, flags); + return ret; +} diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c index 0e72921c752d..827e93078b94 100644 --- a/drivers/scsi/qla4xxx/ql4_isr.c +++ b/drivers/scsi/qla4xxx/ql4_isr.c @@ -224,8 +224,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, * I/O to this device. We should get a ddb state change * AEN soon. */ - if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) - qla4xxx_mark_device_missing(ha, ddb_entry); + if (iscsi_is_session_online(ddb_entry->sess)) + qla4xxx_mark_device_missing(ddb_entry->sess); break; case SCS_DATA_UNDERRUN: @@ -306,8 +306,8 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha, * send I/O to this device. We should get a ddb * state change AEN soon. */ - if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) - qla4xxx_mark_device_missing(ha, ddb_entry); + if (iscsi_is_session_online(ddb_entry->sess)) + qla4xxx_mark_device_missing(ddb_entry->sess); cmd->result = DID_TRANSPORT_DISRUPTED << 16; break; @@ -341,6 +341,51 @@ status_entry_exit: } /** + * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C) + * @ha: Pointer to host adapter structure. + * @sts_entry: Pointer to status entry structure. + **/ +static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha, + struct passthru_status *sts_entry) +{ + struct iscsi_task *task; + struct ddb_entry *ddb_entry; + struct ql4_task_data *task_data; + struct iscsi_cls_conn *cls_conn; + struct iscsi_conn *conn; + itt_t itt; + uint32_t fw_ddb_index; + + itt = sts_entry->handle; + fw_ddb_index = le32_to_cpu(sts_entry->target); + + ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); + + if (ddb_entry == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n", + __func__, sts_entry->target); + return; + } + + cls_conn = ddb_entry->conn; + conn = cls_conn->dd_data; + spin_lock(&conn->session->lock); + task = iscsi_itt_to_task(conn, itt); + spin_unlock(&conn->session->lock); + + if (task == NULL) { + ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__); + return; + } + + task_data = task->dd_data; + memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status)); + ha->req_q_count += task_data->iocb_req_cnt; + ha->iocb_cnt -= task_data->iocb_req_cnt; + queue_work(ha->task_wq, &task_data->task_work); +} + +/** * qla4xxx_process_response_queue - process response queue completions * @ha: Pointer to host adapter structure. * @@ -375,6 +420,14 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha) break; case ET_PASSTHRU_STATUS: + if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU) + qla4xxx_passthru_status_entry(ha, + (struct passthru_status *)sts_entry); + else + ql4_printk(KERN_ERR, ha, + "%s: Invalid status received\n", + __func__); + break; case ET_STATUS_CONTINUATION: @@ -566,6 +619,8 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha, else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) && (mbox_sts[2] == ACB_STATE_VALID)) set_bit(DPC_RESET_HA, &ha->dpc_flags); + else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED)) + complete(&ha->disable_acb_comp); break; case MBOX_ASTS_MAC_ADDRESS_CHANGED: @@ -1009,23 +1064,23 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen) switch (mbox_sts[0]) { case MBOX_ASTS_DATABASE_CHANGED: - if (process_aen == FLUSH_DDB_CHANGED_AENS) { + switch (process_aen) { + case FLUSH_DDB_CHANGED_AENS: DEBUG2(printk("scsi%ld: AEN[%d] %04x, index " "[%d] state=%04x FLUSHED!\n", ha->host_no, ha->aen_out, mbox_sts[0], mbox_sts[2], mbox_sts[3])); break; + case PROCESS_ALL_AENS: + default: + /* Specific device. */ + if (mbox_sts[1] == 1) + qla4xxx_process_ddb_changed(ha, + mbox_sts[2], mbox_sts[3], + mbox_sts[4]); + break; } - case PROCESS_ALL_AENS: - default: - if (mbox_sts[1] == 0) { /* Global DB change. */ - qla4xxx_reinitialize_ddb_list(ha); - } else if (mbox_sts[1] == 1) { /* Specific device. */ - qla4xxx_process_ddb_changed(ha, mbox_sts[2], - mbox_sts[3], mbox_sts[4]); - } - break; } spin_lock_irqsave(&ha->hardware_lock, flags); } diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c index fce8289e9752..4c2b84870392 100644 --- a/drivers/scsi/qla4xxx/ql4_mbx.c +++ b/drivers/scsi/qla4xxx/ql4_mbx.c @@ -303,7 +303,7 @@ qla4xxx_set_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, return QLA_SUCCESS; } -static uint8_t +uint8_t qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, dma_addr_t init_fw_cb_dma) { @@ -327,43 +327,69 @@ qla4xxx_get_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, static void qla4xxx_update_local_ip(struct scsi_qla_host *ha, - struct addr_ctrl_blk *init_fw_cb) + struct addr_ctrl_blk *init_fw_cb) { + ha->ip_config.tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); + ha->ip_config.ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); + ha->ip_config.ipv4_addr_state = + le16_to_cpu(init_fw_cb->ipv4_addr_state); + ha->ip_config.eth_mtu_size = + le16_to_cpu(init_fw_cb->eth_mtu_size); + ha->ip_config.ipv4_port = le16_to_cpu(init_fw_cb->ipv4_port); + + if (ha->acb_version == ACB_SUPPORTED) { + ha->ip_config.ipv6_options = le16_to_cpu(init_fw_cb->ipv6_opts); + ha->ip_config.ipv6_addl_options = + le16_to_cpu(init_fw_cb->ipv6_addtl_opts); + } + /* Save IPv4 Address Info */ - memcpy(ha->ip_address, init_fw_cb->ipv4_addr, - min(sizeof(ha->ip_address), sizeof(init_fw_cb->ipv4_addr))); - memcpy(ha->subnet_mask, init_fw_cb->ipv4_subnet, - min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->ipv4_subnet))); - memcpy(ha->gateway, init_fw_cb->ipv4_gw_addr, - min(sizeof(ha->gateway), sizeof(init_fw_cb->ipv4_gw_addr))); + memcpy(ha->ip_config.ip_address, init_fw_cb->ipv4_addr, + min(sizeof(ha->ip_config.ip_address), + sizeof(init_fw_cb->ipv4_addr))); + memcpy(ha->ip_config.subnet_mask, init_fw_cb->ipv4_subnet, + min(sizeof(ha->ip_config.subnet_mask), + sizeof(init_fw_cb->ipv4_subnet))); + memcpy(ha->ip_config.gateway, init_fw_cb->ipv4_gw_addr, + min(sizeof(ha->ip_config.gateway), + sizeof(init_fw_cb->ipv4_gw_addr))); + + ha->ip_config.ipv4_vlan_tag = be16_to_cpu(init_fw_cb->ipv4_vlan_tag); if (is_ipv6_enabled(ha)) { /* Save IPv6 Address */ - ha->ipv6_link_local_state = init_fw_cb->ipv6_lnk_lcl_addr_state; - ha->ipv6_addr0_state = init_fw_cb->ipv6_addr0_state; - ha->ipv6_addr1_state = init_fw_cb->ipv6_addr1_state; - ha->ipv6_default_router_state = init_fw_cb->ipv6_dflt_rtr_state; - ha->ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; - ha->ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; - - memcpy(&ha->ipv6_link_local_addr.in6_u.u6_addr8[8], - init_fw_cb->ipv6_if_id, - min(sizeof(ha->ipv6_link_local_addr)/2, - sizeof(init_fw_cb->ipv6_if_id))); - memcpy(&ha->ipv6_addr0, init_fw_cb->ipv6_addr0, - min(sizeof(ha->ipv6_addr0), - sizeof(init_fw_cb->ipv6_addr0))); - memcpy(&ha->ipv6_addr1, init_fw_cb->ipv6_addr1, - min(sizeof(ha->ipv6_addr1), - sizeof(init_fw_cb->ipv6_addr1))); - memcpy(&ha->ipv6_default_router_addr, - init_fw_cb->ipv6_dflt_rtr_addr, - min(sizeof(ha->ipv6_default_router_addr), - sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); + ha->ip_config.ipv6_link_local_state = + le16_to_cpu(init_fw_cb->ipv6_lnk_lcl_addr_state); + ha->ip_config.ipv6_addr0_state = + le16_to_cpu(init_fw_cb->ipv6_addr0_state); + ha->ip_config.ipv6_addr1_state = + le16_to_cpu(init_fw_cb->ipv6_addr1_state); + ha->ip_config.ipv6_default_router_state = + le16_to_cpu(init_fw_cb->ipv6_dflt_rtr_state); + ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[0] = 0xFE; + ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[1] = 0x80; + + memcpy(&ha->ip_config.ipv6_link_local_addr.in6_u.u6_addr8[8], + init_fw_cb->ipv6_if_id, + min(sizeof(ha->ip_config.ipv6_link_local_addr)/2, + sizeof(init_fw_cb->ipv6_if_id))); + memcpy(&ha->ip_config.ipv6_addr0, init_fw_cb->ipv6_addr0, + min(sizeof(ha->ip_config.ipv6_addr0), + sizeof(init_fw_cb->ipv6_addr0))); + memcpy(&ha->ip_config.ipv6_addr1, init_fw_cb->ipv6_addr1, + min(sizeof(ha->ip_config.ipv6_addr1), + sizeof(init_fw_cb->ipv6_addr1))); + memcpy(&ha->ip_config.ipv6_default_router_addr, + init_fw_cb->ipv6_dflt_rtr_addr, + min(sizeof(ha->ip_config.ipv6_default_router_addr), + sizeof(init_fw_cb->ipv6_dflt_rtr_addr))); + ha->ip_config.ipv6_vlan_tag = + be16_to_cpu(init_fw_cb->ipv6_vlan_tag); + ha->ip_config.ipv6_port = le16_to_cpu(init_fw_cb->ipv6_port); } } -static uint8_t +uint8_t qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, uint32_t *mbox_sts, @@ -383,9 +409,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, /* Save some info in adapter structure. */ ha->acb_version = init_fw_cb->acb_version; ha->firmware_options = le16_to_cpu(init_fw_cb->fw_options); - ha->tcp_options = le16_to_cpu(init_fw_cb->ipv4_tcp_opts); - ha->ipv4_options = le16_to_cpu(init_fw_cb->ipv4_ip_opts); - ha->ipv4_addr_state = le16_to_cpu(init_fw_cb->ipv4_addr_state); ha->heartbeat_interval = init_fw_cb->hb_interval; memcpy(ha->name_string, init_fw_cb->iscsi_name, min(sizeof(ha->name_string), @@ -393,10 +416,6 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha, /*memcpy(ha->alias, init_fw_cb->Alias, min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/ - if (ha->acb_version == ACB_SUPPORTED) { - ha->ipv6_options = init_fw_cb->ipv6_opts; - ha->ipv6_addl_options = init_fw_cb->ipv6_addtl_opts; - } qla4xxx_update_local_ip(ha, init_fw_cb); return QLA_SUCCESS; @@ -462,10 +481,11 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha) init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); - /* Set bit for "serialize task mgmt" all other bits need to be zero */ init_fw_cb->add_fw_options = 0; init_fw_cb->add_fw_options |= - __constant_cpu_to_le16(SERIALIZE_TASK_MGMT); + __constant_cpu_to_le16(ADFWOPT_SERIALIZE_TASK_MGMT); + init_fw_cb->add_fw_options |= + __constant_cpu_to_le16(ADFWOPT_AUTOCONN_DISABLE); if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma) != QLA_SUCCESS) { @@ -691,19 +711,38 @@ exit_get_fwddb: return status; } +int qla4xxx_conn_open(struct scsi_qla_host *ha, uint16_t fw_ddb_index) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_OPEN; + mbox_cmd[1] = fw_ddb_index; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: status = %d mbx0 = 0x%x mbx1 = 0x%x\n", + __func__, status, mbox_sts[0], mbox_sts[1])); + return status; +} + /** * qla4xxx_set_fwddb_entry - sets a ddb entry. * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index - * @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL. + * @fw_ddb_entry_dma: dma address of ddb entry + * @mbx_sts: mailbox 0 to be returned or NULL * * This routine initializes or updates the adapter's device database - * entry for the specified device. It also triggers a login for the - * specified device. Therefore, it may also be used as a secondary - * login routine when a NULL pointer is specified for the fw_ddb_entry. + * entry for the specified device. **/ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, - dma_addr_t fw_ddb_entry_dma) + dma_addr_t fw_ddb_entry_dma, uint32_t *mbx_sts) { uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; @@ -722,13 +761,41 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index, mbox_cmd[4] = sizeof(struct dev_db_entry); status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], - &mbox_sts[0]); + &mbox_sts[0]); + if (mbx_sts) + *mbx_sts = mbox_sts[0]; DEBUG2(printk("scsi%ld: %s: status=%d mbx0=0x%x mbx4=0x%x\n", ha->host_no, __func__, status, mbox_sts[0], mbox_sts[4]);) return status; } +int qla4xxx_session_logout_ddb(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, int options) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; + mbox_cmd[1] = ddb_entry->fw_ddb_index; + mbox_cmd[3] = options; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT " + "failed sts %04X %04X", __func__, + mbox_sts[0], mbox_sts[1])); + } + + return status; +} + /** * qla4xxx_get_crash_record - retrieves crash record. * @ha: Pointer to host adapter structure. @@ -805,7 +872,6 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha) uint32_t max_event_log_entries; uint8_t i; - memset(&mbox_cmd, 0, sizeof(mbox_cmd)); memset(&mbox_sts, 0, sizeof(mbox_cmd)); @@ -1104,7 +1170,7 @@ exit_about_fw: return status; } -static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, +static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, uint32_t options, dma_addr_t dma_addr) { uint32_t mbox_cmd[MBOX_REG_COUNT]; @@ -1114,6 +1180,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS; + mbox_cmd[1] = options; mbox_cmd[2] = LSDW(dma_addr); mbox_cmd[3] = MSDW(dma_addr); @@ -1126,8 +1193,10 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, return QLA_SUCCESS; } -static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index) +int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index, + uint32_t *mbx_sts) { + int status; uint32_t mbox_cmd[MBOX_REG_COUNT]; uint32_t mbox_sts[MBOX_REG_COUNT]; @@ -1135,75 +1204,646 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index) memset(&mbox_sts, 0, sizeof(mbox_sts)); mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; - mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES; + mbox_cmd[1] = ddb_index; - if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) != - QLA_SUCCESS) { - if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) { - *ddb_index = mbox_sts[2]; + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", + __func__, mbox_sts[0])); + } + + *mbx_sts = mbox_sts[0]; + return status; +} + +int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t ddb_index) +{ + int status; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY; + mbox_cmd[1] = ddb_index; + + status = qla4xxx_mailbox_command(ha, 2, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: failed status %04X\n", + __func__, mbox_sts[0])); + } + + return status; +} + +int qla4xxx_set_flash(struct scsi_qla_host *ha, dma_addr_t dma_addr, + uint32_t offset, uint32_t length, uint32_t options) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_WRITE_FLASH; + mbox_cmd[1] = LSDW(dma_addr); + mbox_cmd[2] = MSDW(dma_addr); + mbox_cmd[3] = offset; + mbox_cmd[4] = length; + mbox_cmd[5] = options; + + status = qla4xxx_mailbox_command(ha, 6, 2, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_WRITE_FLASH " + "failed w/ status %04X, mbx1 %04X\n", + __func__, mbox_sts[0], mbox_sts[1])); + } + return status; +} + +int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha, + struct dev_db_entry *fw_ddb_entry, + dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index) +{ + uint32_t dev_db_start_offset = FLASH_OFFSET_DB_INFO; + uint32_t dev_db_end_offset; + int status = QLA_ERROR; + + memset(fw_ddb_entry, 0, sizeof(*fw_ddb_entry)); + + dev_db_start_offset += (ddb_index * sizeof(*fw_ddb_entry)); + dev_db_end_offset = FLASH_OFFSET_DB_END; + + if (dev_db_start_offset > dev_db_end_offset) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s:Invalid DDB index %d", __func__, + ddb_index)); + goto exit_bootdb_failed; + } + + if (qla4xxx_get_flash(ha, fw_ddb_entry_dma, dev_db_start_offset, + sizeof(*fw_ddb_entry)) != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" + "failed\n", ha->host_no, __func__); + goto exit_bootdb_failed; + } + + if (fw_ddb_entry->cookie == DDB_VALID_COOKIE) + status = QLA_SUCCESS; + +exit_bootdb_failed: + return status; +} + +int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password, + uint16_t idx) +{ + int ret = 0; + int rval = QLA_ERROR; + uint32_t offset = 0, chap_size; + struct ql4_chap_table *chap_table; + dma_addr_t chap_dma; + + chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); + if (chap_table == NULL) { + ret = -ENOMEM; + goto exit_get_chap; + } + + chap_size = sizeof(struct ql4_chap_table); + memset(chap_table, 0, chap_size); + + if (is_qla40XX(ha)) + offset = FLASH_CHAP_OFFSET | (idx * chap_size); + else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + /* flt_chap_size is CHAP table size for both ports + * so divide it by 2 to calculate the offset for second port + */ + if (ha->port_num == 1) + offset += (ha->hw.flt_chap_size / 2); + offset += (idx * chap_size); + } + + rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (rval != QLA_SUCCESS) { + ret = -EINVAL; + goto exit_get_chap; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "Chap Cookie: x%x\n", + __le16_to_cpu(chap_table->cookie))); + + if (__le16_to_cpu(chap_table->cookie) != CHAP_VALID_COOKIE) { + ql4_printk(KERN_ERR, ha, "No valid chap entry found\n"); + goto exit_get_chap; + } + + strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); + +exit_get_chap: + dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); + return ret; +} + +static int qla4xxx_set_chap(struct scsi_qla_host *ha, char *username, + char *password, uint16_t idx, int bidi) +{ + int ret = 0; + int rval = QLA_ERROR; + uint32_t offset = 0; + struct ql4_chap_table *chap_table; + dma_addr_t chap_dma; + + chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma); + if (chap_table == NULL) { + ret = -ENOMEM; + goto exit_set_chap; + } + + memset(chap_table, 0, sizeof(struct ql4_chap_table)); + if (bidi) + chap_table->flags |= BIT_6; /* peer */ + else + chap_table->flags |= BIT_7; /* local */ + chap_table->secret_len = strlen(password); + strncpy(chap_table->secret, password, MAX_CHAP_SECRET_LEN); + strncpy(chap_table->name, username, MAX_CHAP_NAME_LEN); + chap_table->cookie = __constant_cpu_to_le16(CHAP_VALID_COOKIE); + offset = FLASH_CHAP_OFFSET | (idx * sizeof(struct ql4_chap_table)); + rval = qla4xxx_set_flash(ha, chap_dma, offset, + sizeof(struct ql4_chap_table), + FLASH_OPT_RMW_COMMIT); + + if (rval == QLA_SUCCESS && ha->chap_list) { + /* Update ha chap_list cache */ + memcpy((struct ql4_chap_table *)ha->chap_list + idx, + chap_table, sizeof(struct ql4_chap_table)); + } + dma_pool_free(ha->chap_dma_pool, chap_table, chap_dma); + if (rval != QLA_SUCCESS) + ret = -EINVAL; + +exit_set_chap: + return ret; +} + +/** + * qla4xxx_get_chap_index - Get chap index given username and secret + * @ha: pointer to adapter structure + * @username: CHAP username to be searched + * @password: CHAP password to be searched + * @bidi: Is this a BIDI CHAP + * @chap_index: CHAP index to be returned + * + * Match the username and password in the chap_list, return the index if a + * match is found. If a match is not found then add the entry in FLASH and + * return the index at which entry is written in the FLASH. + **/ +static int qla4xxx_get_chap_index(struct scsi_qla_host *ha, char *username, + char *password, int bidi, uint16_t *chap_index) +{ + int i, rval; + int free_index = -1; + int found_index = 0; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla8022(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + return QLA_ERROR; + } + + mutex_lock(&ha->chap_sem); + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + if (chap_table->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + if (i > MAX_RESRV_CHAP_IDX && free_index == -1) + free_index = i; + continue; + } + if (bidi) { + if (chap_table->flags & BIT_7) + continue; } else { - DEBUG2(printk("scsi%ld: %s: failed status %04X\n", - ha->host_no, __func__, mbox_sts[0])); - return QLA_ERROR; + if (chap_table->flags & BIT_6) + continue; + } + if (!strncmp(chap_table->secret, password, + MAX_CHAP_SECRET_LEN) && + !strncmp(chap_table->name, username, + MAX_CHAP_NAME_LEN)) { + *chap_index = i; + found_index = 1; + break; } - } else { - *ddb_index = MAX_PRST_DEV_DB_ENTRIES; } - return QLA_SUCCESS; + /* If chap entry is not present and a free index is available then + * write the entry in flash + */ + if (!found_index && free_index != -1) { + rval = qla4xxx_set_chap(ha, username, password, + free_index, bidi); + if (!rval) { + *chap_index = free_index; + found_index = 1; + } + } + + mutex_unlock(&ha->chap_sem); + + if (found_index) + return QLA_SUCCESS; + return QLA_ERROR; } +int qla4xxx_conn_close_sess_logout(struct scsi_qla_host *ha, + uint16_t fw_ddb_index, + uint16_t connection_id, + uint16_t option) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT; + mbox_cmd[1] = fw_ddb_index; + mbox_cmd[2] = connection_id; + mbox_cmd[3] = option; -int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port) + status = qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_CONN_CLOSE " + "option %04x failed w/ status %04X %04X\n", + __func__, option, mbox_sts[0], mbox_sts[1])); + } + return status; +} + +int qla4xxx_disable_acb(struct scsi_qla_host *ha) { - struct dev_db_entry *fw_ddb_entry; - dma_addr_t fw_ddb_entry_dma; - uint32_t ddb_index; - int ret_val = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_DISABLE_ACB; + status = qla4xxx_mailbox_command(ha, 8, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_DISABLE_ACB " + "failed w/ status %04X %04X %04X", __func__, + mbox_sts[0], mbox_sts[1], mbox_sts[2])); + } + return status; +} + +int qla4xxx_get_acb(struct scsi_qla_host *ha, dma_addr_t acb_dma, + uint32_t acb_type, uint32_t len) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; - fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, - sizeof(*fw_ddb_entry), + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_ACB; + mbox_cmd[1] = acb_type; + mbox_cmd[2] = LSDW(acb_dma); + mbox_cmd[3] = MSDW(acb_dma); + mbox_cmd[4] = len; + + status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_GET_ACB " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_acb(struct scsi_qla_host *ha, uint32_t *mbox_cmd, + uint32_t *mbox_sts, dma_addr_t acb_dma) +{ + int status = QLA_SUCCESS; + + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + mbox_cmd[0] = MBOX_CMD_SET_ACB; + mbox_cmd[1] = 0; /* Primary ACB */ + mbox_cmd[2] = LSDW(acb_dma); + mbox_cmd[3] = MSDW(acb_dma); + mbox_cmd[4] = sizeof(struct addr_ctrl_blk); + + status = qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: MBOX_CMD_SET_ACB " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_param_ddbentry(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry, + struct iscsi_cls_conn *cls_conn, + uint32_t *mbx_sts) +{ + struct dev_db_entry *fw_ddb_entry; + struct iscsi_conn *conn; + struct iscsi_session *sess; + struct qla_conn *qla_conn; + struct sockaddr *dst_addr; + dma_addr_t fw_ddb_entry_dma; + int status = QLA_SUCCESS; + int rval = 0; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + char *ip; + uint16_t iscsi_opts = 0; + uint32_t options = 0; + uint16_t idx; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (!fw_ddb_entry) { - DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", - ha->host_no, __func__)); - ret_val = QLA_ERROR; - goto exit_send_tgts_no_free; + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer.\n", + __func__)); + rval = -ENOMEM; + goto exit_set_param_no_free; } - ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma); - if (ret_val != QLA_SUCCESS) - goto exit_send_tgts; + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + sess = conn->session; + dst_addr = &qla_conn->qla_ep->dst_addr; - ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index); - if (ret_val != QLA_SUCCESS) - goto exit_send_tgts; + if (dst_addr->sa_family == AF_INET6) + options |= IPV6_DEFAULT_DDB_ENTRY; - memset(fw_ddb_entry->iscsi_alias, 0, - sizeof(fw_ddb_entry->iscsi_alias)); + status = qla4xxx_get_default_ddb(ha, options, fw_ddb_entry_dma); + if (status == QLA_ERROR) { + rval = -EINVAL; + goto exit_set_param; + } - memset(fw_ddb_entry->iscsi_name, 0, - sizeof(fw_ddb_entry->iscsi_name)); + iscsi_opts = le16_to_cpu(fw_ddb_entry->iscsi_options); + memset(fw_ddb_entry->iscsi_alias, 0, sizeof(fw_ddb_entry->iscsi_alias)); + + memset(fw_ddb_entry->iscsi_name, 0, sizeof(fw_ddb_entry->iscsi_name)); + + if (sess->targetname != NULL) { + memcpy(fw_ddb_entry->iscsi_name, sess->targetname, + min(strlen(sess->targetname), + sizeof(fw_ddb_entry->iscsi_name))); + } memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr)); - memset(fw_ddb_entry->tgt_addr, 0, - sizeof(fw_ddb_entry->tgt_addr)); + memset(fw_ddb_entry->tgt_addr, 0, sizeof(fw_ddb_entry->tgt_addr)); + + fw_ddb_entry->options = DDB_OPT_TARGET | DDB_OPT_AUTO_SENDTGTS_DISABLE; + + if (dst_addr->sa_family == AF_INET) { + addr = (struct sockaddr_in *)dst_addr; + ip = (char *)&addr->sin_addr; + memcpy(fw_ddb_entry->ip_addr, ip, IP_ADDR_LEN); + fw_ddb_entry->port = cpu_to_le16(ntohs(addr->sin_port)); + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Destination Address [%pI4]: index [%d]\n", + __func__, fw_ddb_entry->ip_addr, + ddb_entry->fw_ddb_index)); + } else if (dst_addr->sa_family == AF_INET6) { + addr6 = (struct sockaddr_in6 *)dst_addr; + ip = (char *)&addr6->sin6_addr; + memcpy(fw_ddb_entry->ip_addr, ip, IPv6_ADDR_LEN); + fw_ddb_entry->port = cpu_to_le16(ntohs(addr6->sin6_port)); + fw_ddb_entry->options |= DDB_OPT_IPV6_DEVICE; + DEBUG2(ql4_printk(KERN_INFO, ha, + "%s: Destination Address [%pI6]: index [%d]\n", + __func__, fw_ddb_entry->ip_addr, + ddb_entry->fw_ddb_index)); + } else { + ql4_printk(KERN_ERR, ha, + "%s: Failed to get IP Address\n", + __func__); + rval = -EINVAL; + goto exit_set_param; + } + + /* CHAP */ + if (sess->username != NULL && sess->password != NULL) { + if (strlen(sess->username) && strlen(sess->password)) { + iscsi_opts |= BIT_7; + + rval = qla4xxx_get_chap_index(ha, sess->username, + sess->password, + LOCAL_CHAP, &idx); + if (rval) + goto exit_set_param; + + fw_ddb_entry->chap_tbl_idx = cpu_to_le16(idx); + } + } + + if (sess->username_in != NULL && sess->password_in != NULL) { + /* Check if BIDI CHAP */ + if (strlen(sess->username_in) && strlen(sess->password_in)) { + iscsi_opts |= BIT_4; + + rval = qla4xxx_get_chap_index(ha, sess->username_in, + sess->password_in, + BIDI_CHAP, &idx); + if (rval) + goto exit_set_param; + } + } + + if (sess->initial_r2t_en) + iscsi_opts |= BIT_10; + + if (sess->imm_data_en) + iscsi_opts |= BIT_11; + + fw_ddb_entry->iscsi_options = cpu_to_le16(iscsi_opts); + + if (conn->max_recv_dlength) + fw_ddb_entry->iscsi_max_rcv_data_seg_len = + __constant_cpu_to_le16((conn->max_recv_dlength / BYTE_UNITS)); - fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET); - fw_ddb_entry->port = cpu_to_le16(ntohs(port)); + if (sess->max_r2t) + fw_ddb_entry->iscsi_max_outsnd_r2t = cpu_to_le16(sess->max_r2t); - fw_ddb_entry->ip_addr[0] = *ip; - fw_ddb_entry->ip_addr[1] = *(ip + 1); - fw_ddb_entry->ip_addr[2] = *(ip + 2); - fw_ddb_entry->ip_addr[3] = *(ip + 3); + if (sess->first_burst) + fw_ddb_entry->iscsi_first_burst_len = + __constant_cpu_to_le16((sess->first_burst / BYTE_UNITS)); - ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma); + if (sess->max_burst) + fw_ddb_entry->iscsi_max_burst_len = + __constant_cpu_to_le16((sess->max_burst / BYTE_UNITS)); -exit_send_tgts: + if (sess->time2wait) + fw_ddb_entry->iscsi_def_time2wait = + cpu_to_le16(sess->time2wait); + + if (sess->time2retain) + fw_ddb_entry->iscsi_def_time2retain = + cpu_to_le16(sess->time2retain); + + status = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, + fw_ddb_entry_dma, mbx_sts); + + if (status != QLA_SUCCESS) + rval = -EINVAL; +exit_set_param: dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); -exit_send_tgts_no_free: - return ret_val; +exit_set_param_no_free: + return rval; +} + +int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index, + uint16_t stats_size, dma_addr_t stats_dma) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT); + memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT); + mbox_cmd[0] = MBOX_CMD_GET_MANAGEMENT_DATA; + mbox_cmd[1] = fw_ddb_index; + mbox_cmd[2] = LSDW(stats_dma); + mbox_cmd[3] = MSDW(stats_dma); + mbox_cmd[4] = stats_size; + + status = qla4xxx_mailbox_command(ha, 5, 1, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, + "%s: MBOX_CMD_GET_MANAGEMENT_DATA " + "failed w/ status %04X\n", __func__, + mbox_sts[0])); + } + return status; } +int qla4xxx_get_ip_state(struct scsi_qla_host *ha, uint32_t acb_idx, + uint32_t ip_idx, uint32_t *sts) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + int status = QLA_SUCCESS; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + mbox_cmd[0] = MBOX_CMD_GET_IP_ADDR_STATE; + mbox_cmd[1] = acb_idx; + mbox_cmd[2] = ip_idx; + + status = qla4xxx_mailbox_command(ha, 3, 8, &mbox_cmd[0], &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: " + "MBOX_CMD_GET_IP_ADDR_STATE failed w/ " + "status %04X\n", __func__, mbox_sts[0])); + } + memcpy(sts, mbox_sts, sizeof(mbox_sts)); + return status; +} + +int qla4xxx_get_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_GET_NVRAM; + mbox_cmd[1] = LSDW(nvram_dma); + mbox_cmd[2] = MSDW(nvram_dma); + mbox_cmd[3] = offset; + mbox_cmd[4] = size; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma, + uint32_t offset, uint32_t size) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_SET_NVRAM; + mbox_cmd[1] = LSDW(nvram_dma); + mbox_cmd[2] = MSDW(nvram_dma); + mbox_cmd[3] = offset; + mbox_cmd[4] = size; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} + +int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha, + uint32_t region, uint32_t field0, + uint32_t field1) +{ + int status = QLA_SUCCESS; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + mbox_cmd[0] = MBOX_CMD_RESTORE_FACTORY_DEFAULTS; + mbox_cmd[3] = region; + mbox_cmd[4] = field0; + mbox_cmd[5] = field1; + + status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], + &mbox_sts[0]); + if (status != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "status %04X\n", ha->host_no, __func__, + mbox_sts[0])); + } + return status; +} diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c index b4b859b2d47e..7851f314ba96 100644 --- a/drivers/scsi/qla4xxx/ql4_nvram.c +++ b/drivers/scsi/qla4xxx/ql4_nvram.c @@ -156,6 +156,27 @@ u16 rd_nvram_word(struct scsi_qla_host * ha, int offset) return val; } +u8 rd_nvram_byte(struct scsi_qla_host *ha, int offset) +{ + u16 val = 0; + u8 rval = 0; + int index = 0; + + if (offset & 0x1) + index = (offset - 1) / 2; + else + index = offset / 2; + + val = le16_to_cpu(rd_nvram_word(ha, index)); + + if (offset & 0x1) + rval = (u8)((val & 0xff00) >> 8); + else + rval = (u8)((val & 0x00ff)); + + return rval; +} + int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha) { int status = QLA_ERROR; diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index fdfe27b38698..f484ff438199 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -2015,11 +2015,19 @@ qla4_8xxx_get_flt_info(struct scsi_qla_host *ha, uint32_t flt_addr) hw->flt_region_boot = start; break; case FLT_REG_FW_82: + case FLT_REG_FW_82_1: hw->flt_region_fw = start; break; case FLT_REG_BOOTLOAD_82: hw->flt_region_bootload = start; break; + case FLT_REG_ISCSI_PARAM: + hw->flt_iscsi_param = start; + break; + case FLT_REG_ISCSI_CHAP: + hw->flt_region_chap = start; + hw->flt_chap_size = le32_to_cpu(region->size); + break; } } goto done; @@ -2032,6 +2040,9 @@ no_flash_data: hw->flt_region_boot = FA_BOOT_CODE_ADDR_82; hw->flt_region_bootload = FA_BOOT_LOAD_ADDR_82; hw->flt_region_fw = FA_RISC_CODE_ADDR_82; + hw->flt_region_chap = FA_FLASH_ISCSI_CHAP; + hw->flt_chap_size = FA_FLASH_CHAP_SIZE; + done: DEBUG2(ql4_printk(KERN_INFO, ha, "FLT[%s]: flt=0x%x fdt=0x%x " "boot=0x%x bootload=0x%x fw=0x%x\n", loc, hw->flt_region_flt, @@ -2258,10 +2269,16 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha) } /* Save M.A.C. address & serial_number */ + ha->port_num = sys_info->port_num; memcpy(ha->my_mac, &sys_info->mac_addr[0], min(sizeof(ha->my_mac), sizeof(sys_info->mac_addr))); memcpy(ha->serial_number, &sys_info->serial_number, min(sizeof(ha->serial_number), sizeof(sys_info->serial_number))); + memcpy(ha->model_name, &sys_info->board_id_str, + min(sizeof(ha->model_name), sizeof(sys_info->board_id_str))); + ha->phy_port_cnt = sys_info->phys_port_cnt; + ha->phy_port_num = sys_info->port_num; + ha->iscsi_pci_func_cnt = sys_info->iscsi_pci_func_cnt; DEBUG2(printk("scsi%ld: %s: " "mac %02x:%02x:%02x:%02x:%02x:%02x " diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index f2364ec59f03..30f31b127f33 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -6,6 +6,8 @@ */ #include <linux/moduleparam.h> #include <linux/slab.h> +#include <linux/blkdev.h> +#include <linux/iscsi_boot_sysfs.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> @@ -63,6 +65,7 @@ MODULE_PARM_DESC(ql4xsess_recovery_tmo, "Target Session Recovery Timeout.\n" " Default: 30 sec."); +static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); /* * SCSI host template entry points */ @@ -71,18 +74,41 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha); /* * iSCSI template entry points */ -static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost, - enum iscsi_tgt_dscvr type, uint32_t enable, - struct sockaddr *dst_addr); static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, enum iscsi_param param, char *buf); -static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, - enum iscsi_param param, char *buf); static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf); -static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session); +static int qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, + uint32_t len); +static int qla4xxx_get_iface_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf); static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc); - +static struct iscsi_endpoint *qla4xxx_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking); +static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms); +static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep); +static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, char *buf); +static int qla4xxx_conn_start(struct iscsi_cls_conn *conn); +static struct iscsi_cls_conn * +qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx); +static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading); +static void qla4xxx_conn_destroy(struct iscsi_cls_conn *conn); +static struct iscsi_cls_session * +qla4xxx_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max, + uint16_t qdepth, uint32_t initial_cmdsn); +static void qla4xxx_session_destroy(struct iscsi_cls_session *sess); +static void qla4xxx_task_work(struct work_struct *wdata); +static int qla4xxx_alloc_pdu(struct iscsi_task *, uint8_t); +static int qla4xxx_task_xmit(struct iscsi_task *); +static void qla4xxx_task_cleanup(struct iscsi_task *); +static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session); +static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats); /* * SCSI host template entry points */ @@ -94,7 +120,8 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd); static int qla4xxx_slave_alloc(struct scsi_device *device); static int qla4xxx_slave_configure(struct scsi_device *device); static void qla4xxx_slave_destroy(struct scsi_device *sdev); -static void qla4xxx_scan_start(struct Scsi_Host *shost); +static mode_t ql4_attr_is_visible(int param_type, int param); +static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); static struct qla4_8xxx_legacy_intr_set legacy_intr[] = QLA82XX_LEGACY_INTR_CONFIG; @@ -115,9 +142,6 @@ static struct scsi_host_template qla4xxx_driver_template = { .slave_alloc = qla4xxx_slave_alloc, .slave_destroy = qla4xxx_slave_destroy, - .scan_finished = iscsi_scan_finished, - .scan_start = qla4xxx_scan_start, - .this_id = -1, .cmd_per_lun = 3, .use_clustering = ENABLE_CLUSTERING, @@ -125,58 +149,396 @@ static struct scsi_host_template qla4xxx_driver_template = { .max_sectors = 0xFFFF, .shost_attrs = qla4xxx_host_attrs, + .host_reset = qla4xxx_host_reset, + .vendor_id = SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_QLOGIC, }; static struct iscsi_transport qla4xxx_iscsi_transport = { .owner = THIS_MODULE, .name = DRIVER_NAME, - .caps = CAP_FW_DB | CAP_SENDTARGETS_OFFLOAD | - CAP_DATA_PATH_OFFLOAD, - .param_mask = ISCSI_CONN_PORT | ISCSI_CONN_ADDRESS | - ISCSI_TARGET_NAME | ISCSI_TPGT | - ISCSI_TARGET_ALIAS, - .host_param_mask = ISCSI_HOST_HWADDRESS | - ISCSI_HOST_IPADDRESS | - ISCSI_HOST_INITIATOR_NAME, - .tgt_dscvr = qla4xxx_tgt_dscvr, + .caps = CAP_TEXT_NEGO | + CAP_DATA_PATH_OFFLOAD | CAP_HDRDGST | + CAP_DATADGST | CAP_LOGIN_OFFLOAD | + CAP_MULTI_R2T, + .attr_is_visible = ql4_attr_is_visible, + .create_session = qla4xxx_session_create, + .destroy_session = qla4xxx_session_destroy, + .start_conn = qla4xxx_conn_start, + .create_conn = qla4xxx_conn_create, + .bind_conn = qla4xxx_conn_bind, + .stop_conn = iscsi_conn_stop, + .destroy_conn = qla4xxx_conn_destroy, + .set_param = iscsi_set_param, .get_conn_param = qla4xxx_conn_get_param, - .get_session_param = qla4xxx_sess_get_param, + .get_session_param = iscsi_session_get_param, + .get_ep_param = qla4xxx_get_ep_param, + .ep_connect = qla4xxx_ep_connect, + .ep_poll = qla4xxx_ep_poll, + .ep_disconnect = qla4xxx_ep_disconnect, + .get_stats = qla4xxx_conn_get_stats, + .send_pdu = iscsi_conn_send_pdu, + .xmit_task = qla4xxx_task_xmit, + .cleanup_task = qla4xxx_task_cleanup, + .alloc_pdu = qla4xxx_alloc_pdu, + .get_host_param = qla4xxx_host_get_param, - .session_recovery_timedout = qla4xxx_recovery_timedout, + .set_iface_param = qla4xxx_iface_set_param, + .get_iface_param = qla4xxx_get_iface_param, + .bsg_request = qla4xxx_bsg_request, }; static struct scsi_transport_template *qla4xxx_scsi_transport; -static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +static mode_t ql4_attr_is_visible(int param_type, int param) { - struct iscsi_cls_session *session; - struct ddb_entry *ddb_entry; + switch (param_type) { + case ISCSI_HOST_PARAM: + switch (param) { + case ISCSI_HOST_PARAM_HWADDRESS: + case ISCSI_HOST_PARAM_IPADDRESS: + case ISCSI_HOST_PARAM_INITIATOR_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_PARAM: + switch (param) { + case ISCSI_PARAM_PERSISTENT_ADDRESS: + case ISCSI_PARAM_PERSISTENT_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_TARGET_NAME: + case ISCSI_PARAM_TPGT: + case ISCSI_PARAM_TARGET_ALIAS: + case ISCSI_PARAM_MAX_BURST: + case ISCSI_PARAM_MAX_R2T: + case ISCSI_PARAM_FIRST_BURST: + case ISCSI_PARAM_MAX_RECV_DLENGTH: + case ISCSI_PARAM_MAX_XMIT_DLENGTH: + case ISCSI_PARAM_IFACE_NAME: + return S_IRUGO; + default: + return 0; + } + case ISCSI_NET_PARAM: + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + case ISCSI_NET_PARAM_IPV4_SUBNET: + case ISCSI_NET_PARAM_IPV4_GW: + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + case ISCSI_NET_PARAM_IFACE_ENABLE: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + case ISCSI_NET_PARAM_IPV6_ADDR: + case ISCSI_NET_PARAM_IPV6_ROUTER: + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + case ISCSI_NET_PARAM_VLAN_ID: + case ISCSI_NET_PARAM_VLAN_PRIORITY: + case ISCSI_NET_PARAM_VLAN_ENABLED: + case ISCSI_NET_PARAM_MTU: + case ISCSI_NET_PARAM_PORT: + return S_IRUGO; + default: + return 0; + } + } - session = starget_to_session(scsi_target(sc->device)); - ddb_entry = session->dd_data; + return 0; +} - /* if we are not logged in then the LLD is going to clean up the cmd */ - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) - return BLK_EH_RESET_TIMER; - else - return BLK_EH_NOT_HANDLED; +static int qla4xxx_get_iface_param(struct iscsi_iface *iface, + enum iscsi_param_type param_type, + int param, char *buf) +{ + struct Scsi_Host *shost = iscsi_iface_to_shost(iface); + struct scsi_qla_host *ha = to_qla_host(shost); + int len = -ENOSYS; + + if (param_type != ISCSI_NET_PARAM) + return -ENOSYS; + + switch (param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + len = sprintf(buf, "%pI4\n", &ha->ip_config.subnet_mask); + break; + case ISCSI_NET_PARAM_IPV4_GW: + len = sprintf(buf, "%pI4\n", &ha->ip_config.gateway); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv4_options & + IPOPT_IPV4_PROTOCOL_ENABLE) ? + "enabled" : "disabled"); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv6_options & + IPV6_OPT_IPV6_PROTOCOL_ENABLE) ? + "enabled" : "disabled"); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + len = sprintf(buf, "%s\n", + (ha->ip_config.tcp_options & TCPOPT_DHCP_ENABLE) ? + "dhcp" : "static"); + break; + case ISCSI_NET_PARAM_IPV6_ADDR: + if (iface->iface_num == 0) + len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr0); + if (iface->iface_num == 1) + len = sprintf(buf, "%pI6\n", &ha->ip_config.ipv6_addr1); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_link_local_addr); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER: + len = sprintf(buf, "%pI6\n", + &ha->ip_config.ipv6_default_router_addr); + break; + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) ? + "nd" : "static"); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv6_addl_options & + IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR) ? + "auto" : "static"); + break; + case ISCSI_NET_PARAM_VLAN_ID: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + (ha->ip_config.ipv4_vlan_tag & + ISCSI_MAX_VLAN_ID)); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%d\n", + (ha->ip_config.ipv6_vlan_tag & + ISCSI_MAX_VLAN_ID)); + break; + case ISCSI_NET_PARAM_VLAN_PRIORITY: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", + ((ha->ip_config.ipv4_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY)); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%d\n", + ((ha->ip_config.ipv6_vlan_tag >> 13) & + ISCSI_MAX_VLAN_PRIORITY)); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv4_options & + IPOPT_VLAN_TAGGING_ENABLE) ? + "enabled" : "disabled"); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%s\n", + (ha->ip_config.ipv6_options & + IPV6_OPT_VLAN_TAGGING_ENABLE) ? + "enabled" : "disabled"); + break; + case ISCSI_NET_PARAM_MTU: + len = sprintf(buf, "%d\n", ha->ip_config.eth_mtu_size); + break; + case ISCSI_NET_PARAM_PORT: + if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) + len = sprintf(buf, "%d\n", ha->ip_config.ipv4_port); + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) + len = sprintf(buf, "%d\n", ha->ip_config.ipv6_port); + break; + default: + len = -ENOSYS; + } + + return len; } -static void qla4xxx_recovery_timedout(struct iscsi_cls_session *session) +static struct iscsi_endpoint * +qla4xxx_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, + int non_blocking) { - struct ddb_entry *ddb_entry = session->dd_data; - struct scsi_qla_host *ha = ddb_entry->ha; + int ret; + struct iscsi_endpoint *ep; + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + struct sockaddr_in *addr; + struct sockaddr_in6 *addr6; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + if (!shost) { + ret = -ENXIO; + printk(KERN_ERR "%s: shost is NULL\n", + __func__); + return ERR_PTR(ret); + } + + ha = iscsi_host_priv(shost); + + ep = iscsi_create_endpoint(sizeof(struct qla_endpoint)); + if (!ep) { + ret = -ENOMEM; + return ERR_PTR(ret); + } + + qla_ep = ep->dd_data; + memset(qla_ep, 0, sizeof(struct qla_endpoint)); + if (dst_addr->sa_family == AF_INET) { + memcpy(&qla_ep->dst_addr, dst_addr, sizeof(struct sockaddr_in)); + addr = (struct sockaddr_in *)&qla_ep->dst_addr; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI4\n", __func__, + (char *)&addr->sin_addr)); + } else if (dst_addr->sa_family == AF_INET6) { + memcpy(&qla_ep->dst_addr, dst_addr, + sizeof(struct sockaddr_in6)); + addr6 = (struct sockaddr_in6 *)&qla_ep->dst_addr; + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: %pI6\n", __func__, + (char *)&addr6->sin6_addr)); + } + + qla_ep->host = shost; + + return ep; +} + +static int qla4xxx_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +{ + struct qla_endpoint *qla_ep; + struct scsi_qla_host *ha; + int ret = 0; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + qla_ep = ep->dd_data; + ha = to_qla_host(qla_ep->host); + + if (adapter_up(ha)) + ret = 1; + + return ret; +} + +static void qla4xxx_ep_disconnect(struct iscsi_endpoint *ep) +{ + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + iscsi_destroy_endpoint(ep); +} + +static int qla4xxx_get_ep_param(struct iscsi_endpoint *ep, + enum iscsi_param param, + char *buf) +{ + struct qla_endpoint *qla_ep = ep->dd_data; + struct sockaddr *dst_addr; - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { - atomic_set(&ddb_entry->state, DDB_STATE_DEAD); + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); - DEBUG2(printk("scsi%ld: %s: ddb [%d] session recovery timeout " - "of (%d) secs exhausted, marking device DEAD.\n", - ha->host_no, __func__, ddb_entry->fw_ddb_index, - ddb_entry->sess->recovery_tmo)); + switch (param) { + case ISCSI_PARAM_CONN_PORT: + case ISCSI_PARAM_CONN_ADDRESS: + if (!qla_ep) + return -ENOTCONN; + + dst_addr = (struct sockaddr *)&qla_ep->dst_addr; + if (!dst_addr) + return -ENOTCONN; + + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + &qla_ep->dst_addr, param, buf); + default: + return -ENOSYS; } } +static void qla4xxx_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) +{ + struct iscsi_session *sess; + struct iscsi_cls_session *cls_sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct ql_iscsi_stats *ql_iscsi_stats; + int stats_size; + int ret; + dma_addr_t iscsi_stats_dma; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + + cls_sess = iscsi_conn_to_session(cls_conn); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + stats_size = PAGE_ALIGN(sizeof(struct ql_iscsi_stats)); + /* Allocate memory */ + ql_iscsi_stats = dma_alloc_coherent(&ha->pdev->dev, stats_size, + &iscsi_stats_dma, GFP_KERNEL); + if (!ql_iscsi_stats) { + ql4_printk(KERN_ERR, ha, + "Unable to allocate memory for iscsi stats\n"); + goto exit_get_stats; + } + + ret = qla4xxx_get_mgmt_data(ha, ddb_entry->fw_ddb_index, stats_size, + iscsi_stats_dma); + if (ret != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, + "Unable to retreive iscsi stats\n"); + goto free_stats; + } + + /* octets */ + stats->txdata_octets = le64_to_cpu(ql_iscsi_stats->tx_data_octets); + stats->rxdata_octets = le64_to_cpu(ql_iscsi_stats->rx_data_octets); + /* xmit pdus */ + stats->noptx_pdus = le32_to_cpu(ql_iscsi_stats->tx_nopout_pdus); + stats->scsicmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_cmd_pdus); + stats->tmfcmd_pdus = le32_to_cpu(ql_iscsi_stats->tx_tmf_cmd_pdus); + stats->login_pdus = le32_to_cpu(ql_iscsi_stats->tx_login_cmd_pdus); + stats->text_pdus = le32_to_cpu(ql_iscsi_stats->tx_text_cmd_pdus); + stats->dataout_pdus = le32_to_cpu(ql_iscsi_stats->tx_scsi_write_pdus); + stats->logout_pdus = le32_to_cpu(ql_iscsi_stats->tx_logout_cmd_pdus); + stats->snack_pdus = le32_to_cpu(ql_iscsi_stats->tx_snack_req_pdus); + /* recv pdus */ + stats->noprx_pdus = le32_to_cpu(ql_iscsi_stats->rx_nopin_pdus); + stats->scsirsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_resp_pdus); + stats->tmfrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_tmf_resp_pdus); + stats->textrsp_pdus = le32_to_cpu(ql_iscsi_stats->rx_text_resp_pdus); + stats->datain_pdus = le32_to_cpu(ql_iscsi_stats->rx_scsi_read_pdus); + stats->logoutrsp_pdus = + le32_to_cpu(ql_iscsi_stats->rx_logout_resp_pdus); + stats->r2t_pdus = le32_to_cpu(ql_iscsi_stats->rx_r2t_pdus); + stats->async_pdus = le32_to_cpu(ql_iscsi_stats->rx_async_pdus); + stats->rjt_pdus = le32_to_cpu(ql_iscsi_stats->rx_reject_pdus); + +free_stats: + dma_free_coherent(&ha->pdev->dev, stats_size, ql_iscsi_stats, + iscsi_stats_dma); +exit_get_stats: + return; +} + +static enum blk_eh_timer_return qla4xxx_eh_cmd_timed_out(struct scsi_cmnd *sc) +{ + struct iscsi_cls_session *session; + struct iscsi_session *sess; + unsigned long flags; + enum blk_eh_timer_return ret = BLK_EH_NOT_HANDLED; + + session = starget_to_session(scsi_target(sc->device)); + sess = session->dd_data; + + spin_lock_irqsave(&session->lock, flags); + if (session->state == ISCSI_SESSION_FAILED) + ret = BLK_EH_RESET_TIMER; + spin_unlock_irqrestore(&session->lock, flags); + + return ret; +} + static int qla4xxx_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param, char *buf) { @@ -188,9 +550,7 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost, len = sysfs_format_mac(buf, ha->my_mac, MAC_ADDR_LEN); break; case ISCSI_HOST_PARAM_IPADDRESS: - len = sprintf(buf, "%d.%d.%d.%d\n", ha->ip_address[0], - ha->ip_address[1], ha->ip_address[2], - ha->ip_address[3]); + len = sprintf(buf, "%pI4\n", &ha->ip_config.ip_address); break; case ISCSI_HOST_PARAM_INITIATOR_NAME: len = sprintf(buf, "%s\n", ha->name_string); @@ -202,154 +562,851 @@ static int qla4xxx_host_get_param(struct Scsi_Host *shost, return len; } -static int qla4xxx_sess_get_param(struct iscsi_cls_session *sess, - enum iscsi_param param, char *buf) +static void qla4xxx_create_ipv4_iface(struct scsi_qla_host *ha) { - struct ddb_entry *ddb_entry = sess->dd_data; - int len; + if (ha->iface_ipv4) + return; - switch (param) { - case ISCSI_PARAM_TARGET_NAME: - len = snprintf(buf, PAGE_SIZE - 1, "%s\n", - ddb_entry->iscsi_name); + /* IPv4 */ + ha->iface_ipv4 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV4, 0, 0); + if (!ha->iface_ipv4) + ql4_printk(KERN_ERR, ha, "Could not create IPv4 iSCSI " + "iface0.\n"); +} + +static void qla4xxx_create_ipv6_iface(struct scsi_qla_host *ha) +{ + if (!ha->iface_ipv6_0) + /* IPv6 iface-0 */ + ha->iface_ipv6_0 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV6, 0, + 0); + if (!ha->iface_ipv6_0) + ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " + "iface0.\n"); + + if (!ha->iface_ipv6_1) + /* IPv6 iface-1 */ + ha->iface_ipv6_1 = iscsi_create_iface(ha->host, + &qla4xxx_iscsi_transport, + ISCSI_IFACE_TYPE_IPV6, 1, + 0); + if (!ha->iface_ipv6_1) + ql4_printk(KERN_ERR, ha, "Could not create IPv6 iSCSI " + "iface1.\n"); +} + +static void qla4xxx_create_ifaces(struct scsi_qla_host *ha) +{ + if (ha->ip_config.ipv4_options & IPOPT_IPV4_PROTOCOL_ENABLE) + qla4xxx_create_ipv4_iface(ha); + + if (ha->ip_config.ipv6_options & IPV6_OPT_IPV6_PROTOCOL_ENABLE) + qla4xxx_create_ipv6_iface(ha); +} + +static void qla4xxx_destroy_ipv4_iface(struct scsi_qla_host *ha) +{ + if (ha->iface_ipv4) { + iscsi_destroy_iface(ha->iface_ipv4); + ha->iface_ipv4 = NULL; + } +} + +static void qla4xxx_destroy_ipv6_iface(struct scsi_qla_host *ha) +{ + if (ha->iface_ipv6_0) { + iscsi_destroy_iface(ha->iface_ipv6_0); + ha->iface_ipv6_0 = NULL; + } + if (ha->iface_ipv6_1) { + iscsi_destroy_iface(ha->iface_ipv6_1); + ha->iface_ipv6_1 = NULL; + } +} + +static void qla4xxx_destroy_ifaces(struct scsi_qla_host *ha) +{ + qla4xxx_destroy_ipv4_iface(ha); + qla4xxx_destroy_ipv6_iface(ha); +} + +static void qla4xxx_set_ipv6(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + /* + * iface_num 0 is valid for IPv6 Addr, linklocal, router, autocfg. + * iface_num 1 is valid only for IPv6 Addr. + */ + switch (iface_param->param) { + case ISCSI_NET_PARAM_IPV6_ADDR: + if (iface_param->iface_num & 0x1) + /* IPv6 Addr 1 */ + memcpy(init_fw_cb->ipv6_addr1, iface_param->value, + sizeof(init_fw_cb->ipv6_addr1)); + else + /* IPv6 Addr 0 */ + memcpy(init_fw_cb->ipv6_addr0, iface_param->value, + sizeof(init_fw_cb->ipv6_addr0)); + break; + case ISCSI_NET_PARAM_IPV6_LINKLOCAL: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv6_if_id, &iface_param->value[8], + sizeof(init_fw_cb->ipv6_if_id)); + break; + case ISCSI_NET_PARAM_IPV6_ROUTER: + if (iface_param->iface_num & 0x1) + break; + memcpy(init_fw_cb->ipv6_dflt_rtr_addr, iface_param->value, + sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); + break; + case ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_DISABLE) + init_fw_cb->ipv6_addtl_opts &= + cpu_to_le16( + ~IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); + else if (iface_param->value[0] == ISCSI_IPV6_AUTOCFG_ND_ENABLE) + init_fw_cb->ipv6_addtl_opts |= + cpu_to_le16( + IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE); + else + ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " + "IPv6 addr\n"); break; - case ISCSI_PARAM_TPGT: - len = sprintf(buf, "%u\n", ddb_entry->tpgt); + case ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == + ISCSI_IPV6_LINKLOCAL_AUTOCFG_ENABLE) + init_fw_cb->ipv6_addtl_opts |= cpu_to_le16( + IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); + else if (iface_param->value[0] == + ISCSI_IPV6_LINKLOCAL_AUTOCFG_DISABLE) + init_fw_cb->ipv6_addtl_opts &= cpu_to_le16( + ~IPV6_ADDOPT_AUTOCONFIG_LINK_LOCAL_ADDR); + else + ql4_printk(KERN_ERR, ha, "Invalid autocfg setting for " + "IPv6 linklocal addr\n"); break; - case ISCSI_PARAM_TARGET_ALIAS: - len = snprintf(buf, PAGE_SIZE - 1, "%s\n", - ddb_entry->iscsi_alias); + case ISCSI_NET_PARAM_IPV6_ROUTER_AUTOCFG: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + if (iface_param->value[0] == ISCSI_IPV6_ROUTER_AUTOCFG_ENABLE) + memset(init_fw_cb->ipv6_dflt_rtr_addr, 0, + sizeof(init_fw_cb->ipv6_dflt_rtr_addr)); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_IPV6_PROTOCOL_ENABLE); + qla4xxx_create_ipv6_iface(ha); + } else { + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_IPV6_PROTOCOL_ENABLE & + 0xFFFF); + qla4xxx_destroy_ipv6_iface(ha); + } + break; + case ISCSI_NET_PARAM_VLAN_TAG: + if (iface_param->len != sizeof(init_fw_cb->ipv6_vlan_tag)) + break; + init_fw_cb->ipv6_vlan_tag = + cpu_to_be16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface_param->value[0] == ISCSI_VLAN_ENABLE) + init_fw_cb->ipv6_opts |= + cpu_to_le16(IPV6_OPT_VLAN_TAGGING_ENABLE); + else + init_fw_cb->ipv6_opts &= + cpu_to_le16(~IPV6_OPT_VLAN_TAGGING_ENABLE); + break; + case ISCSI_NET_PARAM_MTU: + init_fw_cb->eth_mtu_size = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_PORT: + /* Autocfg applies to even interface */ + if (iface_param->iface_num & 0x1) + break; + + init_fw_cb->ipv6_port = + cpu_to_le16(*(uint16_t *)iface_param->value); break; default: - return -ENOSYS; + ql4_printk(KERN_ERR, ha, "Unknown IPv6 param = %d\n", + iface_param->param); + break; } +} - return len; +static void qla4xxx_set_ipv4(struct scsi_qla_host *ha, + struct iscsi_iface_param_info *iface_param, + struct addr_ctrl_blk *init_fw_cb) +{ + switch (iface_param->param) { + case ISCSI_NET_PARAM_IPV4_ADDR: + memcpy(init_fw_cb->ipv4_addr, iface_param->value, + sizeof(init_fw_cb->ipv4_addr)); + break; + case ISCSI_NET_PARAM_IPV4_SUBNET: + memcpy(init_fw_cb->ipv4_subnet, iface_param->value, + sizeof(init_fw_cb->ipv4_subnet)); + break; + case ISCSI_NET_PARAM_IPV4_GW: + memcpy(init_fw_cb->ipv4_gw_addr, iface_param->value, + sizeof(init_fw_cb->ipv4_gw_addr)); + break; + case ISCSI_NET_PARAM_IPV4_BOOTPROTO: + if (iface_param->value[0] == ISCSI_BOOTPROTO_DHCP) + init_fw_cb->ipv4_tcp_opts |= + cpu_to_le16(TCPOPT_DHCP_ENABLE); + else if (iface_param->value[0] == ISCSI_BOOTPROTO_STATIC) + init_fw_cb->ipv4_tcp_opts &= + cpu_to_le16(~TCPOPT_DHCP_ENABLE); + else + ql4_printk(KERN_ERR, ha, "Invalid IPv4 bootproto\n"); + break; + case ISCSI_NET_PARAM_IFACE_ENABLE: + if (iface_param->value[0] == ISCSI_IFACE_ENABLE) { + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_IPV4_PROTOCOL_ENABLE); + qla4xxx_create_ipv4_iface(ha); + } else { + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_IPV4_PROTOCOL_ENABLE & + 0xFFFF); + qla4xxx_destroy_ipv4_iface(ha); + } + break; + case ISCSI_NET_PARAM_VLAN_TAG: + if (iface_param->len != sizeof(init_fw_cb->ipv4_vlan_tag)) + break; + init_fw_cb->ipv4_vlan_tag = + cpu_to_be16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_VLAN_ENABLED: + if (iface_param->value[0] == ISCSI_VLAN_ENABLE) + init_fw_cb->ipv4_ip_opts |= + cpu_to_le16(IPOPT_VLAN_TAGGING_ENABLE); + else + init_fw_cb->ipv4_ip_opts &= + cpu_to_le16(~IPOPT_VLAN_TAGGING_ENABLE); + break; + case ISCSI_NET_PARAM_MTU: + init_fw_cb->eth_mtu_size = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + case ISCSI_NET_PARAM_PORT: + init_fw_cb->ipv4_port = + cpu_to_le16(*(uint16_t *)iface_param->value); + break; + default: + ql4_printk(KERN_ERR, ha, "Unknown IPv4 param = %d\n", + iface_param->param); + break; + } } -static int qla4xxx_conn_get_param(struct iscsi_cls_conn *conn, +static void +qla4xxx_initcb_to_acb(struct addr_ctrl_blk *init_fw_cb) +{ + struct addr_ctrl_blk_def *acb; + acb = (struct addr_ctrl_blk_def *)init_fw_cb; + memset(acb->reserved1, 0, sizeof(acb->reserved1)); + memset(acb->reserved2, 0, sizeof(acb->reserved2)); + memset(acb->reserved3, 0, sizeof(acb->reserved3)); + memset(acb->reserved4, 0, sizeof(acb->reserved4)); + memset(acb->reserved5, 0, sizeof(acb->reserved5)); + memset(acb->reserved6, 0, sizeof(acb->reserved6)); + memset(acb->reserved7, 0, sizeof(acb->reserved7)); + memset(acb->reserved8, 0, sizeof(acb->reserved8)); + memset(acb->reserved9, 0, sizeof(acb->reserved9)); + memset(acb->reserved10, 0, sizeof(acb->reserved10)); + memset(acb->reserved11, 0, sizeof(acb->reserved11)); + memset(acb->reserved12, 0, sizeof(acb->reserved12)); + memset(acb->reserved13, 0, sizeof(acb->reserved13)); + memset(acb->reserved14, 0, sizeof(acb->reserved14)); + memset(acb->reserved15, 0, sizeof(acb->reserved15)); +} + +static int +qla4xxx_iface_set_param(struct Scsi_Host *shost, void *data, uint32_t len) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + int rval = 0; + struct iscsi_iface_param_info *iface_param = NULL; + struct addr_ctrl_blk *init_fw_cb = NULL; + dma_addr_t init_fw_cb_dma; + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + uint32_t rem = len; + struct nlattr *attr; + + init_fw_cb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk), + &init_fw_cb_dma, GFP_KERNEL); + if (!init_fw_cb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc init_cb\n", + __func__); + return -ENOMEM; + } + + memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); + memset(&mbox_cmd, 0, sizeof(mbox_cmd)); + memset(&mbox_sts, 0, sizeof(mbox_sts)); + + if (qla4xxx_get_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)) { + ql4_printk(KERN_ERR, ha, "%s: get ifcb failed\n", __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + nla_for_each_attr(attr, data, len, rem) { + iface_param = nla_data(attr); + + if (iface_param->param_type != ISCSI_NET_PARAM) + continue; + + switch (iface_param->iface_type) { + case ISCSI_IFACE_TYPE_IPV4: + switch (iface_param->iface_num) { + case 0: + qla4xxx_set_ipv4(ha, iface_param, init_fw_cb); + break; + default: + /* Cannot have more than one IPv4 interface */ + ql4_printk(KERN_ERR, ha, "Invalid IPv4 iface " + "number = %d\n", + iface_param->iface_num); + break; + } + break; + case ISCSI_IFACE_TYPE_IPV6: + switch (iface_param->iface_num) { + case 0: + case 1: + qla4xxx_set_ipv6(ha, iface_param, init_fw_cb); + break; + default: + /* Cannot have more than two IPv6 interface */ + ql4_printk(KERN_ERR, ha, "Invalid IPv6 iface " + "number = %d\n", + iface_param->iface_num); + break; + } + break; + default: + ql4_printk(KERN_ERR, ha, "Invalid iface type\n"); + break; + } + } + + init_fw_cb->cookie = cpu_to_le32(0x11BEAD5A); + + rval = qla4xxx_set_flash(ha, init_fw_cb_dma, FLASH_SEGMENT_IFCB, + sizeof(struct addr_ctrl_blk), + FLASH_OPT_RMW_COMMIT); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: set flash mbx failed\n", + __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + qla4xxx_disable_acb(ha); + + qla4xxx_initcb_to_acb(init_fw_cb); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma); + if (rval != QLA_SUCCESS) { + ql4_printk(KERN_ERR, ha, "%s: set acb mbx failed\n", + __func__); + rval = -EIO; + goto exit_init_fw_cb; + } + + memset(init_fw_cb, 0, sizeof(struct addr_ctrl_blk)); + qla4xxx_update_local_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb, + init_fw_cb_dma); + +exit_init_fw_cb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk), + init_fw_cb, init_fw_cb_dma); + + return rval; +} + +static int qla4xxx_conn_get_param(struct iscsi_cls_conn *cls_conn, enum iscsi_param param, char *buf) { - struct iscsi_cls_session *session; - struct ddb_entry *ddb_entry; - int len; + struct iscsi_conn *conn; + struct qla_conn *qla_conn; + struct sockaddr *dst_addr; + int len = 0; - session = iscsi_dev_to_session(conn->dev.parent); - ddb_entry = session->dd_data; + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + dst_addr = &qla_conn->qla_ep->dst_addr; switch (param) { case ISCSI_PARAM_CONN_PORT: - len = sprintf(buf, "%hu\n", ddb_entry->port); - break; case ISCSI_PARAM_CONN_ADDRESS: - /* TODO: what are the ipv6 bits */ - len = sprintf(buf, "%pI4\n", &ddb_entry->ip_addr); - break; + return iscsi_conn_get_addr_param((struct sockaddr_storage *) + dst_addr, param, buf); default: - return -ENOSYS; + return iscsi_conn_get_param(cls_conn, param, buf); } return len; + } -static int qla4xxx_tgt_dscvr(struct Scsi_Host *shost, - enum iscsi_tgt_dscvr type, uint32_t enable, - struct sockaddr *dst_addr) +static struct iscsi_cls_session * +qla4xxx_session_create(struct iscsi_endpoint *ep, + uint16_t cmds_max, uint16_t qdepth, + uint32_t initial_cmdsn) { + struct iscsi_cls_session *cls_sess; struct scsi_qla_host *ha; - struct sockaddr_in *addr; - struct sockaddr_in6 *addr6; + struct qla_endpoint *qla_ep; + struct ddb_entry *ddb_entry; + uint32_t ddb_index; + uint32_t mbx_sts = 0; + struct iscsi_session *sess; + struct sockaddr *dst_addr; + int ret; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + if (!ep) { + printk(KERN_ERR "qla4xxx: missing ep.\n"); + return NULL; + } + + qla_ep = ep->dd_data; + dst_addr = (struct sockaddr *)&qla_ep->dst_addr; + ha = to_qla_host(qla_ep->host); + +get_ddb_index: + ddb_index = find_first_zero_bit(ha->ddb_idx_map, MAX_DDB_ENTRIES); + + if (ddb_index >= MAX_DDB_ENTRIES) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Free DDB index not available\n")); + return NULL; + } + + if (test_and_set_bit(ddb_index, ha->ddb_idx_map)) + goto get_ddb_index; + + DEBUG2(ql4_printk(KERN_INFO, ha, + "Found a free DDB index at %d\n", ddb_index)); + ret = qla4xxx_req_ddb_entry(ha, ddb_index, &mbx_sts); + if (ret == QLA_ERROR) { + if (mbx_sts == MBOX_STS_COMMAND_ERROR) { + ql4_printk(KERN_INFO, ha, + "DDB index = %d not available trying next\n", + ddb_index); + goto get_ddb_index; + } + DEBUG2(ql4_printk(KERN_INFO, ha, + "Free FW DDB not available\n")); + return NULL; + } + + cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, qla_ep->host, + cmds_max, sizeof(struct ddb_entry), + sizeof(struct ql4_task_data), + initial_cmdsn, ddb_index); + if (!cls_sess) + return NULL; + + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->fw_ddb_index = ddb_index; + ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE; + ddb_entry->ha = ha; + ddb_entry->sess = cls_sess; + cls_sess->recovery_tmo = ql4xsess_recovery_tmo; + ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; + ha->tot_ddbs++; + + return cls_sess; +} + +static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + unsigned long flags; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); + + spin_lock_irqsave(&ha->hardware_lock, flags); + qla4xxx_free_ddb(ha, ddb_entry); + spin_unlock_irqrestore(&ha->hardware_lock, flags); + iscsi_session_teardown(cls_sess); +} + +static struct iscsi_cls_conn * +qla4xxx_conn_create(struct iscsi_cls_session *cls_sess, uint32_t conn_idx) +{ + struct iscsi_cls_conn *cls_conn; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), + conn_idx); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->conn = cls_conn; + + return cls_conn; +} + +static int qla4xxx_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_fd, int is_leading) +{ + struct iscsi_conn *conn; + struct qla_conn *qla_conn; + struct iscsi_endpoint *ep; + + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + + if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) + return -EINVAL; + ep = iscsi_lookup_endpoint(transport_fd); + conn = cls_conn->dd_data; + qla_conn = conn->dd_data; + qla_conn->qla_ep = ep->dd_data; + return 0; +} + +static int qla4xxx_conn_start(struct iscsi_cls_conn *cls_conn) +{ + struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_entry_dma; + uint32_t mbx_sts = 0; int ret = 0; + int status = QLA_SUCCESS; - ha = (struct scsi_qla_host *) shost->hostdata; + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; - switch (type) { - case ISCSI_TGT_DSCVR_SEND_TARGETS: - if (dst_addr->sa_family == AF_INET) { - addr = (struct sockaddr_in *)dst_addr; - if (qla4xxx_send_tgts(ha, (char *)&addr->sin_addr, - addr->sin_port) != QLA_SUCCESS) - ret = -EIO; - } else if (dst_addr->sa_family == AF_INET6) { - /* - * TODO: fix qla4xxx_send_tgts - */ - addr6 = (struct sockaddr_in6 *)dst_addr; - if (qla4xxx_send_tgts(ha, (char *)&addr6->sin6_addr, - addr6->sin6_port) != QLA_SUCCESS) - ret = -EIO; - } else - ret = -ENOSYS; - break; - default: - ret = -ENOSYS; + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + return -ENOMEM; + } + + ret = qla4xxx_set_param_ddbentry(ha, ddb_entry, cls_conn, &mbx_sts); + if (ret) { + /* If iscsid is stopped and started then no need to do + * set param again since ddb state will be already + * active and FW does not allow set ddb to an + * active session. + */ + if (mbx_sts) + if (ddb_entry->fw_ddb_device_state == + DDB_DS_SESSION_ACTIVE) { + iscsi_conn_start(ddb_entry->conn); + iscsi_conn_login_event(ddb_entry->conn, + ISCSI_CONN_STATE_LOGGED_IN); + goto exit_set_param; + } + + ql4_printk(KERN_ERR, ha, "%s: Failed set param for index[%d]\n", + __func__, ddb_entry->fw_ddb_index); + goto exit_conn_start; + } + + status = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); + if (status == QLA_ERROR) { + ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, + sess->targetname); + ret = -EINVAL; + goto exit_conn_start; } + + if (ddb_entry->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) + ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; + + DEBUG2(printk(KERN_INFO "%s: DDB state [%d]\n", __func__, + ddb_entry->fw_ddb_device_state)); + +exit_set_param: + ret = 0; + +exit_conn_start: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); return ret; } -void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry) +static void qla4xxx_conn_destroy(struct iscsi_cls_conn *cls_conn) { - if (!ddb_entry->sess) - return; + struct iscsi_cls_session *cls_sess = iscsi_conn_to_session(cls_conn); + struct iscsi_session *sess; + struct scsi_qla_host *ha; + struct ddb_entry *ddb_entry; + int options; - if (ddb_entry->conn) { - atomic_set(&ddb_entry->state, DDB_STATE_DEAD); - iscsi_remove_session(ddb_entry->sess); + DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); + sess = cls_sess->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + + options = LOGOUT_OPTION_CLOSE_SESSION; + if (qla4xxx_session_logout_ddb(ha, ddb_entry, options) == QLA_ERROR) + ql4_printk(KERN_ERR, ha, "%s: Logout failed\n", __func__); +} + +static void qla4xxx_task_work(struct work_struct *wdata) +{ + struct ql4_task_data *task_data; + struct scsi_qla_host *ha; + struct passthru_status *sts; + struct iscsi_task *task; + struct iscsi_hdr *hdr; + uint8_t *data; + uint32_t data_len; + struct iscsi_conn *conn; + int hdr_len; + itt_t itt; + + task_data = container_of(wdata, struct ql4_task_data, task_work); + ha = task_data->ha; + task = task_data->task; + sts = &task_data->sts; + hdr_len = sizeof(struct iscsi_hdr); + + DEBUG3(printk(KERN_INFO "Status returned\n")); + DEBUG3(qla4xxx_dump_buffer(sts, 64)); + DEBUG3(printk(KERN_INFO "Response buffer")); + DEBUG3(qla4xxx_dump_buffer(task_data->resp_buffer, 64)); + + conn = task->conn; + + switch (sts->completionStatus) { + case PASSTHRU_STATUS_COMPLETE: + hdr = (struct iscsi_hdr *)task_data->resp_buffer; + /* Assign back the itt in hdr, until we use the PREASSIGN_TAG */ + itt = sts->handle; + hdr->itt = itt; + data = task_data->resp_buffer + hdr_len; + data_len = task_data->resp_len - hdr_len; + iscsi_complete_pdu(conn, hdr, data, data_len); + break; + default: + ql4_printk(KERN_ERR, ha, "Passthru failed status = 0x%x\n", + sts->completionStatus); + break; } - iscsi_free_session(ddb_entry->sess); + return; } -int qla4xxx_add_sess(struct ddb_entry *ddb_entry) +static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) { - int err; + struct ql4_task_data *task_data; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; + int hdr_len; - ddb_entry->sess->recovery_tmo = ql4xsess_recovery_tmo; + sess = task->conn->session; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + task_data = task->dd_data; + memset(task_data, 0, sizeof(struct ql4_task_data)); - err = iscsi_add_session(ddb_entry->sess, ddb_entry->fw_ddb_index); - if (err) { - DEBUG2(printk(KERN_ERR "Could not add session.\n")); - return err; + if (task->sc) { + ql4_printk(KERN_INFO, ha, + "%s: SCSI Commands not implemented\n", __func__); + return -EINVAL; } - ddb_entry->conn = iscsi_create_conn(ddb_entry->sess, 0, 0); - if (!ddb_entry->conn) { - iscsi_remove_session(ddb_entry->sess); - DEBUG2(printk(KERN_ERR "Could not add connection.\n")); - return -ENOMEM; + hdr_len = sizeof(struct iscsi_hdr); + task_data->ha = ha; + task_data->task = task; + + if (task->data_count) { + task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, + task->data_count, + PCI_DMA_TODEVICE); } - /* finally ready to go */ - iscsi_unblock_session(ddb_entry->sess); + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", + __func__, task->conn->max_recv_dlength, hdr_len)); + + task_data->resp_len = task->conn->max_recv_dlength + hdr_len; + task_data->resp_buffer = dma_alloc_coherent(&ha->pdev->dev, + task_data->resp_len, + &task_data->resp_dma, + GFP_ATOMIC); + if (!task_data->resp_buffer) + goto exit_alloc_pdu; + + task_data->req_len = task->data_count + hdr_len; + task_data->req_buffer = dma_alloc_coherent(&ha->pdev->dev, + task_data->req_len, + &task_data->req_dma, + GFP_ATOMIC); + if (!task_data->req_buffer) + goto exit_alloc_pdu; + + task->hdr = task_data->req_buffer; + + INIT_WORK(&task_data->task_work, qla4xxx_task_work); + return 0; + +exit_alloc_pdu: + if (task_data->resp_buffer) + dma_free_coherent(&ha->pdev->dev, task_data->resp_len, + task_data->resp_buffer, task_data->resp_dma); + + if (task_data->req_buffer) + dma_free_coherent(&ha->pdev->dev, task_data->req_len, + task_data->req_buffer, task_data->req_dma); + return -ENOMEM; } -struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha) +static void qla4xxx_task_cleanup(struct iscsi_task *task) { + struct ql4_task_data *task_data; + struct iscsi_session *sess; struct ddb_entry *ddb_entry; - struct iscsi_cls_session *sess; - - sess = iscsi_alloc_session(ha->host, &qla4xxx_iscsi_transport, - sizeof(struct ddb_entry)); - if (!sess) - return NULL; + struct scsi_qla_host *ha; + int hdr_len; + hdr_len = sizeof(struct iscsi_hdr); + sess = task->conn->session; ddb_entry = sess->dd_data; - memset(ddb_entry, 0, sizeof(*ddb_entry)); - ddb_entry->ha = ha; - ddb_entry->sess = sess; - return ddb_entry; + ha = ddb_entry->ha; + task_data = task->dd_data; + + if (task->data_count) { + dma_unmap_single(&ha->pdev->dev, task_data->data_dma, + task->data_count, PCI_DMA_TODEVICE); + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", + __func__, task->conn->max_recv_dlength, hdr_len)); + + dma_free_coherent(&ha->pdev->dev, task_data->resp_len, + task_data->resp_buffer, task_data->resp_dma); + dma_free_coherent(&ha->pdev->dev, task_data->req_len, + task_data->req_buffer, task_data->req_dma); + return; } -static void qla4xxx_scan_start(struct Scsi_Host *shost) +static int qla4xxx_task_xmit(struct iscsi_task *task) { - struct scsi_qla_host *ha = shost_priv(shost); - struct ddb_entry *ddb_entry, *ddbtemp; + struct scsi_cmnd *sc = task->sc; + struct iscsi_session *sess = task->conn->session; + struct ddb_entry *ddb_entry = sess->dd_data; + struct scsi_qla_host *ha = ddb_entry->ha; + + if (!sc) + return qla4xxx_send_passthru0(task); - /* finish setup of sessions that were already setup in firmware */ - list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { - if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) - qla4xxx_add_sess(ddb_entry); + ql4_printk(KERN_INFO, ha, "%s: scsi cmd xmit not implemented\n", + __func__); + return -ENOSYS; +} + +void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha, + struct ddb_entry *ddb_entry) +{ + struct iscsi_cls_session *cls_sess; + struct iscsi_cls_conn *cls_conn; + struct iscsi_session *sess; + struct iscsi_conn *conn; + uint32_t ddb_state; + dma_addr_t fw_ddb_entry_dma; + struct dev_db_entry *fw_ddb_entry; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", __func__); + return; } + + if (qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_entry, + fw_ddb_entry_dma, NULL, NULL, &ddb_state, + NULL, NULL, NULL) == QLA_ERROR) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: failed " + "get_ddb_entry for fw_ddb_index %d\n", + ha->host_no, __func__, + ddb_entry->fw_ddb_index)); + return; + } + + cls_sess = ddb_entry->sess; + sess = cls_sess->dd_data; + + cls_conn = ddb_entry->conn; + conn = cls_conn->dd_data; + + /* Update params */ + conn->max_recv_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_rcv_data_seg_len); + + conn->max_xmit_dlength = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_snd_data_seg_len); + + sess->initial_r2t_en = + (BIT_10 & le16_to_cpu(fw_ddb_entry->iscsi_options)); + + sess->max_r2t = le16_to_cpu(fw_ddb_entry->iscsi_max_outsnd_r2t); + + sess->imm_data_en = (BIT_11 & le16_to_cpu(fw_ddb_entry->iscsi_options)); + + sess->first_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_first_burst_len); + + sess->max_burst = BYTE_UNITS * + le16_to_cpu(fw_ddb_entry->iscsi_max_burst_len); + + sess->time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); + + sess->time2retain = le16_to_cpu(fw_ddb_entry->iscsi_def_time2retain); + + sess->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); + + memcpy(sess->initiatorname, ha->name_string, + min(sizeof(ha->name_string), sizeof(sess->initiatorname))); } /* @@ -376,25 +1433,15 @@ static void qla4xxx_stop_timer(struct scsi_qla_host *ha) } /*** - * qla4xxx_mark_device_missing - mark a device as missing. - * @ha: Pointer to host adapter structure. + * qla4xxx_mark_device_missing - blocks the session + * @cls_session: Pointer to the session to be blocked * @ddb_entry: Pointer to device database entry * * This routine marks a device missing and close connection. **/ -void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, - struct ddb_entry *ddb_entry) +void qla4xxx_mark_device_missing(struct iscsi_cls_session *cls_session) { - if ((atomic_read(&ddb_entry->state) != DDB_STATE_DEAD)) { - atomic_set(&ddb_entry->state, DDB_STATE_MISSING); - DEBUG2(printk("scsi%ld: ddb [%d] marked MISSING\n", - ha->host_no, ddb_entry->fw_ddb_index)); - } else - DEBUG2(printk("scsi%ld: ddb [%d] DEAD\n", ha->host_no, - ddb_entry->fw_ddb_index)) - - iscsi_block_session(ddb_entry->sess); - iscsi_conn_error_event(ddb_entry->conn, ISCSI_ERR_CONN_FAILED); + iscsi_block_session(cls_session); } /** @@ -405,10 +1452,7 @@ void qla4xxx_mark_device_missing(struct scsi_qla_host *ha, **/ void qla4xxx_mark_all_devices_missing(struct scsi_qla_host *ha) { - struct ddb_entry *ddb_entry, *ddbtemp; - list_for_each_entry_safe(ddb_entry, ddbtemp, &ha->ddb_list, list) { - qla4xxx_mark_device_missing(ha, ddb_entry); - } + iscsi_host_for_each_session(ha->host, qla4xxx_mark_device_missing); } static struct srb* qla4xxx_get_new_srb(struct scsi_qla_host *ha, @@ -495,20 +1539,13 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto qc_fail_command; } - if (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { - if (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD) { - cmd->result = DID_NO_CONNECT << 16; - goto qc_fail_command; - } - return SCSI_MLQUEUE_TARGET_BUSY; - } - if (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags) || test_bit(DPC_RESET_ACTIVE, &ha->dpc_flags) || test_bit(DPC_RESET_HA, &ha->dpc_flags) || test_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags) || test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) || !test_bit(AF_ONLINE, &ha->flags) || + !test_bit(AF_LINK_UP, &ha->flags) || test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags)) goto qc_host_busy; @@ -563,6 +1600,13 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha) ha->srb_mempool = NULL; + if (ha->chap_dma_pool) + dma_pool_destroy(ha->chap_dma_pool); + + if (ha->chap_list) + vfree(ha->chap_list); + ha->chap_list = NULL; + /* release io space registers */ if (is_qla8022(ha)) { if (ha->nx_pcibase) @@ -636,6 +1680,15 @@ static int qla4xxx_mem_alloc(struct scsi_qla_host *ha) goto mem_alloc_error_exit; } + ha->chap_dma_pool = dma_pool_create("ql4_chap", &ha->pdev->dev, + CHAP_DMA_BLOCK_SIZE, 8, 0); + + if (ha->chap_dma_pool == NULL) { + ql4_printk(KERN_WARNING, ha, + "%s: chap_dma_pool allocation failed..\n", __func__); + goto mem_alloc_error_exit; + } + return QLA_SUCCESS; mem_alloc_error_exit: @@ -753,7 +1806,6 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha) **/ static void qla4xxx_timer(struct scsi_qla_host *ha) { - struct ddb_entry *ddb_entry, *dtemp; int start_dpc = 0; uint16_t w; @@ -773,69 +1825,6 @@ static void qla4xxx_timer(struct scsi_qla_host *ha) qla4_8xxx_watchdog(ha); } - /* Search for relogin's to time-out and port down retry. */ - list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { - /* Count down time between sending relogins */ - if (adapter_up(ha) && - !test_bit(DF_RELOGIN, &ddb_entry->flags) && - atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) { - if (atomic_read(&ddb_entry->retry_relogin_timer) != - INVALID_ENTRY) { - if (atomic_read(&ddb_entry->retry_relogin_timer) - == 0) { - atomic_set(&ddb_entry-> - retry_relogin_timer, - INVALID_ENTRY); - set_bit(DPC_RELOGIN_DEVICE, - &ha->dpc_flags); - set_bit(DF_RELOGIN, &ddb_entry->flags); - DEBUG2(printk("scsi%ld: %s: ddb [%d]" - " login device\n", - ha->host_no, __func__, - ddb_entry->fw_ddb_index)); - } else - atomic_dec(&ddb_entry-> - retry_relogin_timer); - } - } - - /* Wait for relogin to timeout */ - if (atomic_read(&ddb_entry->relogin_timer) && - (atomic_dec_and_test(&ddb_entry->relogin_timer) != 0)) { - /* - * If the relogin times out and the device is - * still NOT ONLINE then try and relogin again. - */ - if (atomic_read(&ddb_entry->state) != - DDB_STATE_ONLINE && - ddb_entry->fw_ddb_device_state == - DDB_DS_SESSION_FAILED) { - /* Reset retry relogin timer */ - atomic_inc(&ddb_entry->relogin_retry_count); - DEBUG2(printk("scsi%ld: ddb [%d] relogin" - " timed out-retrying" - " relogin (%d)\n", - ha->host_no, - ddb_entry->fw_ddb_index, - atomic_read(&ddb_entry-> - relogin_retry_count)) - ); - start_dpc++; - DEBUG(printk("scsi%ld:%d:%d: ddb [%d] " - "initiate relogin after" - " %d seconds\n", - ha->host_no, ddb_entry->bus, - ddb_entry->target, - ddb_entry->fw_ddb_index, - ddb_entry->default_time2wait + 4) - ); - - atomic_set(&ddb_entry->retry_relogin_timer, - ddb_entry->default_time2wait + 4); - } - } - } - if (!is_qla8022(ha)) { /* Check for heartbeat interval. */ if (ha->firmware_options & FWOPT_HEARTBEAT_ENABLE && @@ -1081,6 +2070,17 @@ void qla4xxx_dead_adapter_cleanup(struct scsi_qla_host *ha) clear_bit(AF_INIT_DONE, &ha->flags); } +static void qla4xxx_fail_session(struct iscsi_cls_session *cls_session) +{ + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ddb_entry->fw_ddb_device_state = DDB_DS_SESSION_FAILED; + iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED); +} + /** * qla4xxx_recover_adapter - recovers adapter after a fatal error * @ha: Pointer to host adapter structure. @@ -1093,11 +2093,14 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha) /* Stall incoming I/O until we are done */ scsi_block_requests(ha->host); clear_bit(AF_ONLINE, &ha->flags); + clear_bit(AF_LINK_UP, &ha->flags); DEBUG2(ql4_printk(KERN_INFO, ha, "%s: adapter OFFLINE\n", __func__)); set_bit(DPC_RESET_ACTIVE, &ha->dpc_flags); + iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) reset_chip = 1; @@ -1160,7 +2163,7 @@ recover_ha_init_adapter: /* NOTE: AF_ONLINE flag set upon successful completion of * qla4xxx_initialize_adapter */ - status = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); + status = qla4xxx_initialize_adapter(ha); } /* Retry failed adapter initialization, if necessary @@ -1225,27 +2228,34 @@ recover_ha_init_adapter: return status; } -static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) +static void qla4xxx_relogin_devices(struct iscsi_cls_session *cls_session) { - struct ddb_entry *ddb_entry, *dtemp; + struct iscsi_session *sess; + struct ddb_entry *ddb_entry; + struct scsi_qla_host *ha; - list_for_each_entry_safe(ddb_entry, dtemp, &ha->ddb_list, list) { - if ((atomic_read(&ddb_entry->state) == DDB_STATE_MISSING) || - (atomic_read(&ddb_entry->state) == DDB_STATE_DEAD)) { - if (ddb_entry->fw_ddb_device_state == - DDB_DS_SESSION_ACTIVE) { - atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); - ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" - " marked ONLINE\n", ha->host_no, __func__, - ddb_entry->fw_ddb_index); - - iscsi_unblock_session(ddb_entry->sess); - } else - qla4xxx_relogin_device(ha, ddb_entry); + sess = cls_session->dd_data; + ddb_entry = sess->dd_data; + ha = ddb_entry->ha; + if (!iscsi_is_session_online(cls_session)) { + if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { + ql4_printk(KERN_INFO, ha, "scsi%ld: %s: ddb[%d]" + " unblock session\n", ha->host_no, __func__, + ddb_entry->fw_ddb_index); + iscsi_unblock_session(ddb_entry->sess); + } else { + /* Trigger relogin */ + iscsi_session_failure(cls_session->dd_data, + ISCSI_ERR_CONN_FAILED); } } } +static void qla4xxx_relogin_all_devices(struct scsi_qla_host *ha) +{ + iscsi_host_for_each_session(ha->host, qla4xxx_relogin_devices); +} + void qla4xxx_wake_dpc(struct scsi_qla_host *ha) { if (ha->dpc_thread) @@ -1267,7 +2277,6 @@ static void qla4xxx_do_dpc(struct work_struct *work) { struct scsi_qla_host *ha = container_of(work, struct scsi_qla_host, dpc_work); - struct ddb_entry *ddb_entry, *dtemp; int status = QLA_ERROR; DEBUG2(printk("scsi%ld: %s: DPC handler waking up." @@ -1363,31 +2372,6 @@ dpc_post_reset_ha: qla4xxx_relogin_all_devices(ha); } } - - /* ---- relogin device? --- */ - if (adapter_up(ha) && - test_and_clear_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags)) { - list_for_each_entry_safe(ddb_entry, dtemp, - &ha->ddb_list, list) { - if (test_and_clear_bit(DF_RELOGIN, &ddb_entry->flags) && - atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE) - qla4xxx_relogin_device(ha, ddb_entry); - - /* - * If mbx cmd times out there is no point - * in continuing further. - * With large no of targets this can hang - * the system. - */ - if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) { - printk(KERN_WARNING "scsi%ld: %s: " - "need to reset hba\n", - ha->host_no, __func__); - break; - } - } - } - } /** @@ -1410,6 +2394,10 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha) if (ha->dpc_thread) destroy_workqueue(ha->dpc_thread); + /* Kill the kernel thread for this host */ + if (ha->task_wq) + destroy_workqueue(ha->task_wq); + /* Put firmware in known state */ ha->isp_ops->reset_firmware(ha); @@ -1601,6 +2589,594 @@ uint16_t qla4_8xxx_rd_shdw_rsp_q_in(struct scsi_qla_host *ha) return (uint16_t)le32_to_cpu(readl(&ha->qla4_8xxx_reg->rsp_q_in)); } +static ssize_t qla4xxx_show_boot_eth_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = sprintf(str, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = sysfs_format_mac(str, ha->my_mac, + MAC_ADDR_LEN); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static mode_t qla4xxx_eth_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t qla4xxx_show_boot_ini_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = sprintf(str, "%s\n", ha->name_string); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static mode_t qla4xxx_ini_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t +qla4xxx_show_boot_tgt_info(struct ql4_boot_session_info *boot_sess, int type, + char *buf) +{ + struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; + char *str = buf; + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = sprintf(buf, "%s\n", (char *)&boot_sess->target_name); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (boot_sess->conn_list[0].dest_ipaddr.ip_type == 0x1) + rc = sprintf(buf, "%pI4\n", + &boot_conn->dest_ipaddr.ip_address); + else + rc = sprintf(str, "%pI6\n", + &boot_conn->dest_ipaddr.ip_address); + break; + case ISCSI_BOOT_TGT_PORT: + rc = sprintf(str, "%d\n", boot_conn->dest_port); + break; + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.target_chap_name_length, + (char *)&boot_conn->chap.target_chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.target_secret_length, + (char *)&boot_conn->chap.target_secret); + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.intr_chap_name_length, + (char *)&boot_conn->chap.intr_chap_name); + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = sprintf(str, "%.*s\n", + boot_conn->chap.intr_secret_length, + (char *)&boot_conn->chap.intr_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = sprintf(str, "%d\n", SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = sprintf(str, "0\n"); + break; + default: + rc = -ENOSYS; + break; + } + return rc; +} + +static ssize_t qla4xxx_show_boot_tgt_pri_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_pri_sess); + + return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); +} + +static ssize_t qla4xxx_show_boot_tgt_sec_info(void *data, int type, char *buf) +{ + struct scsi_qla_host *ha = data; + struct ql4_boot_session_info *boot_sess = &(ha->boot_tgt.boot_sec_sess); + + return qla4xxx_show_boot_tgt_info(boot_sess, type, buf); +} + +static mode_t qla4xxx_tgt_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = S_IRUGO; + break; + default: + rc = 0; + break; + } + return rc; +} + +static void qla4xxx_boot_release(void *data) +{ + struct scsi_qla_host *ha = data; + + scsi_host_put(ha->host); +} + +static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[]) +{ + dma_addr_t buf_dma; + uint32_t addr, pri_addr, sec_addr; + uint32_t offset; + uint16_t func_num; + uint8_t val; + uint8_t *buf = NULL; + size_t size = 13 * sizeof(uint8_t); + int ret = QLA_SUCCESS; + + func_num = PCI_FUNC(ha->pdev->devfn); + + ql4_printk(KERN_INFO, ha, "%s: Get FW boot info for 0x%x func %d\n", + __func__, ha->pdev->device, func_num); + + if (is_qla40XX(ha)) { + if (func_num == 1) { + addr = NVRAM_PORT0_BOOT_MODE; + pri_addr = NVRAM_PORT0_BOOT_PRI_TGT; + sec_addr = NVRAM_PORT0_BOOT_SEC_TGT; + } else if (func_num == 3) { + addr = NVRAM_PORT1_BOOT_MODE; + pri_addr = NVRAM_PORT1_BOOT_PRI_TGT; + sec_addr = NVRAM_PORT1_BOOT_SEC_TGT; + } else { + ret = QLA_ERROR; + goto exit_boot_info; + } + + /* Check Boot Mode */ + val = rd_nvram_byte(ha, addr); + if (!(val & 0x07)) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Failed Boot options : 0x%x\n", + __func__, val)); + ret = QLA_ERROR; + goto exit_boot_info; + } + + /* get primary valid target index */ + val = rd_nvram_byte(ha, pri_addr); + if (val & BIT_7) + ddb_index[0] = (val & 0x7f); + + /* get secondary valid target index */ + val = rd_nvram_byte(ha, sec_addr); + if (val & BIT_7) + ddb_index[1] = (val & 0x7f); + + } else if (is_qla8022(ha)) { + buf = dma_alloc_coherent(&ha->pdev->dev, size, + &buf_dma, GFP_KERNEL); + if (!buf) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer\n", + __func__)); + ret = QLA_ERROR; + goto exit_boot_info; + } + + if (ha->port_num == 0) + offset = BOOT_PARAM_OFFSET_PORT0; + else if (ha->port_num == 1) + offset = BOOT_PARAM_OFFSET_PORT1; + else { + ret = QLA_ERROR; + goto exit_boot_info_free; + } + addr = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_iscsi_param * 4) + + offset; + if (qla4xxx_get_flash(ha, buf_dma, addr, + 13 * sizeof(uint8_t)) != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "scsi%ld: %s: Get Flash" + "failed\n", ha->host_no, __func__)); + ret = QLA_ERROR; + goto exit_boot_info_free; + } + /* Check Boot Mode */ + if (!(buf[1] & 0x07)) { + DEBUG2(ql4_printk(KERN_INFO, ha, + "Failed: Boot options : 0x%x\n", + buf[1])); + ret = QLA_ERROR; + goto exit_boot_info_free; + } + + /* get primary valid target index */ + if (buf[2] & BIT_7) + ddb_index[0] = buf[2] & 0x7f; + + /* get secondary valid target index */ + if (buf[11] & BIT_7) + ddb_index[1] = buf[11] & 0x7f; + } else { + ret = QLA_ERROR; + goto exit_boot_info; + } + + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Primary target ID %d, Secondary" + " target ID %d\n", __func__, ddb_index[0], + ddb_index[1])); + +exit_boot_info_free: + dma_free_coherent(&ha->pdev->dev, size, buf, buf_dma); +exit_boot_info: + return ret; +} + +/** + * qla4xxx_get_bidi_chap - Get a BIDI CHAP user and password + * @ha: pointer to adapter structure + * @username: CHAP username to be returned + * @password: CHAP password to be returned + * + * If a boot entry has BIDI CHAP enabled then we need to set the BIDI CHAP + * user and password in the sysfs entry in /sys/firmware/iscsi_boot#/. + * So from the CHAP cache find the first BIDI CHAP entry and set it + * to the boot record in sysfs. + **/ +static int qla4xxx_get_bidi_chap(struct scsi_qla_host *ha, char *username, + char *password) +{ + int i, ret = -EINVAL; + int max_chap_entries = 0; + struct ql4_chap_table *chap_table; + + if (is_qla8022(ha)) + max_chap_entries = (ha->hw.flt_chap_size / 2) / + sizeof(struct ql4_chap_table); + else + max_chap_entries = MAX_CHAP_ENTRIES_40XX; + + if (!ha->chap_list) { + ql4_printk(KERN_ERR, ha, "Do not have CHAP table cache\n"); + return ret; + } + + mutex_lock(&ha->chap_sem); + for (i = 0; i < max_chap_entries; i++) { + chap_table = (struct ql4_chap_table *)ha->chap_list + i; + if (chap_table->cookie != + __constant_cpu_to_le16(CHAP_VALID_COOKIE)) { + continue; + } + + if (chap_table->flags & BIT_7) /* local */ + continue; + + if (!(chap_table->flags & BIT_6)) /* Not BIDI */ + continue; + + strncpy(password, chap_table->secret, QL4_CHAP_MAX_SECRET_LEN); + strncpy(username, chap_table->name, QL4_CHAP_MAX_NAME_LEN); + ret = 0; + break; + } + mutex_unlock(&ha->chap_sem); + + return ret; +} + + +static int qla4xxx_get_boot_target(struct scsi_qla_host *ha, + struct ql4_boot_session_info *boot_sess, + uint16_t ddb_index) +{ + struct ql4_conn_info *boot_conn = &boot_sess->conn_list[0]; + struct dev_db_entry *fw_ddb_entry; + dma_addr_t fw_ddb_entry_dma; + uint16_t idx; + uint16_t options; + int ret = QLA_SUCCESS; + + fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + &fw_ddb_entry_dma, GFP_KERNEL); + if (!fw_ddb_entry) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Unable to allocate dma buffer.\n", + __func__)); + ret = QLA_ERROR; + return ret; + } + + if (qla4xxx_bootdb_by_index(ha, fw_ddb_entry, + fw_ddb_entry_dma, ddb_index)) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Flash DDB read Failed\n", __func__)); + ret = QLA_ERROR; + goto exit_boot_target; + } + + /* Update target name and IP from DDB */ + memcpy(boot_sess->target_name, fw_ddb_entry->iscsi_name, + min(sizeof(boot_sess->target_name), + sizeof(fw_ddb_entry->iscsi_name))); + + options = le16_to_cpu(fw_ddb_entry->options); + if (options & DDB_OPT_IPV6_DEVICE) { + memcpy(&boot_conn->dest_ipaddr.ip_address, + &fw_ddb_entry->ip_addr[0], IPv6_ADDR_LEN); + } else { + boot_conn->dest_ipaddr.ip_type = 0x1; + memcpy(&boot_conn->dest_ipaddr.ip_address, + &fw_ddb_entry->ip_addr[0], IP_ADDR_LEN); + } + + boot_conn->dest_port = le16_to_cpu(fw_ddb_entry->port); + + /* update chap information */ + idx = __le16_to_cpu(fw_ddb_entry->chap_tbl_idx); + + if (BIT_7 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { + + DEBUG2(ql4_printk(KERN_INFO, ha, "Setting chap\n")); + + ret = qla4xxx_get_chap(ha, (char *)&boot_conn->chap. + target_chap_name, + (char *)&boot_conn->chap.target_secret, + idx); + if (ret) { + ql4_printk(KERN_ERR, ha, "Failed to set chap\n"); + ret = QLA_ERROR; + goto exit_boot_target; + } + + boot_conn->chap.target_chap_name_length = QL4_CHAP_MAX_NAME_LEN; + boot_conn->chap.target_secret_length = QL4_CHAP_MAX_SECRET_LEN; + } + + if (BIT_4 & le16_to_cpu(fw_ddb_entry->iscsi_options)) { + + DEBUG2(ql4_printk(KERN_INFO, ha, "Setting BIDI chap\n")); + + ret = qla4xxx_get_bidi_chap(ha, + (char *)&boot_conn->chap.intr_chap_name, + (char *)&boot_conn->chap.intr_secret); + + if (ret) { + ql4_printk(KERN_ERR, ha, "Failed to set BIDI chap\n"); + ret = QLA_ERROR; + goto exit_boot_target; + } + + boot_conn->chap.intr_chap_name_length = QL4_CHAP_MAX_NAME_LEN; + boot_conn->chap.intr_secret_length = QL4_CHAP_MAX_SECRET_LEN; + } + +exit_boot_target: + dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), + fw_ddb_entry, fw_ddb_entry_dma); + return ret; +} + +static int qla4xxx_get_boot_info(struct scsi_qla_host *ha) +{ + uint16_t ddb_index[2]; + int ret = QLA_ERROR; + int rval; + + memset(ddb_index, 0, sizeof(ddb_index)); + ddb_index[0] = 0xffff; + ddb_index[1] = 0xffff; + ret = get_fw_boot_info(ha, ddb_index); + if (ret != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, + "%s: Failed to set boot info.\n", __func__)); + return ret; + } + + if (ddb_index[0] == 0xffff) + goto sec_target; + + rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_pri_sess), + ddb_index[0]); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get " + "primary target\n", __func__)); + } else + ret = QLA_SUCCESS; + +sec_target: + if (ddb_index[1] == 0xffff) + goto exit_get_boot_info; + + rval = qla4xxx_get_boot_target(ha, &(ha->boot_tgt.boot_sec_sess), + ddb_index[1]); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_ERR, ha, "%s: Failed to get " + "secondary target\n", __func__)); + } else + ret = QLA_SUCCESS; + +exit_get_boot_info: + return ret; +} + +static int qla4xxx_setup_boot_info(struct scsi_qla_host *ha) +{ + struct iscsi_boot_kobj *boot_kobj; + + if (qla4xxx_get_boot_info(ha) != QLA_SUCCESS) + return 0; + + ha->boot_kset = iscsi_boot_create_host_kset(ha->host->host_no); + if (!ha->boot_kset) + goto kset_free; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_target(ha->boot_kset, 0, ha, + qla4xxx_show_boot_tgt_pri_info, + qla4xxx_tgt_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_target(ha->boot_kset, 1, ha, + qla4xxx_show_boot_tgt_sec_info, + qla4xxx_tgt_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_initiator(ha->boot_kset, 0, ha, + qla4xxx_show_boot_ini_info, + qla4xxx_ini_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(ha->host)) + goto kset_free; + boot_kobj = iscsi_boot_create_ethernet(ha->boot_kset, 0, ha, + qla4xxx_show_boot_eth_info, + qla4xxx_eth_get_attr_visibility, + qla4xxx_boot_release); + if (!boot_kobj) + goto put_host; + + return 0; + +put_host: + scsi_host_put(ha->host); +kset_free: + iscsi_boot_destroy_kset(ha->boot_kset); + return -ENOMEM; +} + + +/** + * qla4xxx_create chap_list - Create CHAP list from FLASH + * @ha: pointer to adapter structure + * + * Read flash and make a list of CHAP entries, during login when a CHAP entry + * is received, it will be checked in this list. If entry exist then the CHAP + * entry index is set in the DDB. If CHAP entry does not exist in this list + * then a new entry is added in FLASH in CHAP table and the index obtained is + * used in the DDB. + **/ +static void qla4xxx_create_chap_list(struct scsi_qla_host *ha) +{ + int rval = 0; + uint8_t *chap_flash_data = NULL; + uint32_t offset; + dma_addr_t chap_dma; + uint32_t chap_size = 0; + + if (is_qla40XX(ha)) + chap_size = MAX_CHAP_ENTRIES_40XX * + sizeof(struct ql4_chap_table); + else /* Single region contains CHAP info for both + * ports which is divided into half for each port. + */ + chap_size = ha->hw.flt_chap_size / 2; + + chap_flash_data = dma_alloc_coherent(&ha->pdev->dev, chap_size, + &chap_dma, GFP_KERNEL); + if (!chap_flash_data) { + ql4_printk(KERN_ERR, ha, "No memory for chap_flash_data\n"); + return; + } + if (is_qla40XX(ha)) + offset = FLASH_CHAP_OFFSET; + else { + offset = FLASH_RAW_ACCESS_ADDR + (ha->hw.flt_region_chap << 2); + if (ha->port_num == 1) + offset += chap_size; + } + + rval = qla4xxx_get_flash(ha, chap_dma, offset, chap_size); + if (rval != QLA_SUCCESS) + goto exit_chap_list; + + if (ha->chap_list == NULL) + ha->chap_list = vmalloc(chap_size); + if (ha->chap_list == NULL) { + ql4_printk(KERN_ERR, ha, "No memory for ha->chap_list\n"); + goto exit_chap_list; + } + + memcpy(ha->chap_list, chap_flash_data, chap_size); + +exit_chap_list: + dma_free_coherent(&ha->pdev->dev, chap_size, + chap_flash_data, chap_dma); + return; +} + /** * qla4xxx_probe_adapter - callback function to probe HBA * @pdev: pointer to pci_dev structure @@ -1624,7 +3200,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, if (pci_enable_device(pdev)) return -1; - host = scsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha)); + host = iscsi_host_alloc(&qla4xxx_driver_template, sizeof(*ha), 0); if (host == NULL) { printk(KERN_WARNING "qla4xxx: Couldn't allocate host from scsi layer!\n"); @@ -1632,7 +3208,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, } /* Clear our data area */ - ha = (struct scsi_qla_host *) host->hostdata; + ha = to_qla_host(host); memset(ha, 0, sizeof(*ha)); /* Save the information from PCI BIOS. */ @@ -1675,11 +3251,12 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, qla4xxx_config_dma_addressing(ha); /* Initialize lists and spinlocks. */ - INIT_LIST_HEAD(&ha->ddb_list); INIT_LIST_HEAD(&ha->free_srb_q); mutex_init(&ha->mbox_sem); + mutex_init(&ha->chap_sem); init_completion(&ha->mbx_intr_comp); + init_completion(&ha->disable_acb_comp); spin_lock_init(&ha->hardware_lock); @@ -1692,6 +3269,27 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, goto probe_failed; } + host->cmd_per_lun = 3; + host->max_channel = 0; + host->max_lun = MAX_LUNS - 1; + host->max_id = MAX_TARGETS; + host->max_cmd_len = IOCB_MAX_CDB_LEN; + host->can_queue = MAX_SRBS ; + host->transportt = qla4xxx_scsi_transport; + + ret = scsi_init_shared_tag_map(host, MAX_SRBS); + if (ret) { + ql4_printk(KERN_WARNING, ha, + "%s: scsi_init_shared_tag_map failed\n", __func__); + goto probe_failed; + } + + pci_set_drvdata(pdev, ha); + + ret = scsi_add_host(host, &pdev->dev); + if (ret) + goto probe_failed; + if (is_qla8022(ha)) (void) qla4_8xxx_get_flash_info(ha); @@ -1700,7 +3298,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, * firmware * NOTE: interrupts enabled upon successful completion */ - status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); + status = qla4xxx_initialize_adapter(ha); while ((!test_bit(AF_ONLINE, &ha->flags)) && init_retry_count++ < MAX_INIT_RETRIES) { @@ -1721,7 +3319,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, if (ha->isp_ops->reset_chip(ha) == QLA_ERROR) continue; - status = qla4xxx_initialize_adapter(ha, REBUILD_DDB_LIST); + status = qla4xxx_initialize_adapter(ha); } if (!test_bit(AF_ONLINE, &ha->flags)) { @@ -1736,24 +3334,9 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, qla4_8xxx_idc_unlock(ha); } ret = -ENODEV; - goto probe_failed; + goto remove_host; } - host->cmd_per_lun = 3; - host->max_channel = 0; - host->max_lun = MAX_LUNS - 1; - host->max_id = MAX_TARGETS; - host->max_cmd_len = IOCB_MAX_CDB_LEN; - host->can_queue = MAX_SRBS ; - host->transportt = qla4xxx_scsi_transport; - - ret = scsi_init_shared_tag_map(host, MAX_SRBS); - if (ret) { - ql4_printk(KERN_WARNING, ha, - "scsi_init_shared_tag_map failed\n"); - goto probe_failed; - } - /* Startup the kernel thread for this host adapter. */ DEBUG2(printk("scsi: %s: Starting kernel thread for " "qla4xxx_dpc\n", __func__)); @@ -1762,10 +3345,18 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, if (!ha->dpc_thread) { ql4_printk(KERN_WARNING, ha, "Unable to start DPC thread!\n"); ret = -ENODEV; - goto probe_failed; + goto remove_host; } INIT_WORK(&ha->dpc_work, qla4xxx_do_dpc); + sprintf(buf, "qla4xxx_%lu_task", ha->host_no); + ha->task_wq = alloc_workqueue(buf, WQ_MEM_RECLAIM, 1); + if (!ha->task_wq) { + ql4_printk(KERN_WARNING, ha, "Unable to start task thread!\n"); + ret = -ENODEV; + goto remove_host; + } + /* For ISP-82XX, request_irqs is called in qla4_8xxx_load_risc * (which is called indirectly by qla4xxx_initialize_adapter), * so that irqs will be registered after crbinit but before @@ -1776,7 +3367,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, if (ret) { ql4_printk(KERN_WARNING, ha, "Failed to reserve " "interrupt %d already in use.\n", pdev->irq); - goto probe_failed; + goto remove_host; } } @@ -1788,21 +3379,25 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev, set_bit(AF_INIT_DONE, &ha->flags); - pci_set_drvdata(pdev, ha); - - ret = scsi_add_host(host, &pdev->dev); - if (ret) - goto probe_failed; - printk(KERN_INFO " QLogic iSCSI HBA Driver version: %s\n" " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", qla4xxx_version_str, ha->pdev->device, pci_name(ha->pdev), ha->host_no, ha->firmware_version[0], ha->firmware_version[1], ha->patch_number, ha->build_number); - scsi_scan_host(host); + + qla4xxx_create_chap_list(ha); + + if (qla4xxx_setup_boot_info(ha)) + ql4_printk(KERN_ERR, ha, "%s:ISCSI boot info setup failed\n", + __func__); + + qla4xxx_create_ifaces(ha); return 0; +remove_host: + scsi_remove_host(ha->host); + probe_failed: qla4xxx_free_adapter(ha); @@ -1867,8 +3462,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev) if (!is_qla8022(ha)) qla4xxx_prevent_other_port_reinit(ha); - /* remove devs from iscsi_sessions to scsi_devices */ - qla4xxx_free_ddb_list(ha); + /* destroy iface from sysfs */ + qla4xxx_destroy_ifaces(ha); + + if (ha->boot_kset) + iscsi_boot_destroy_kset(ha->boot_kset); scsi_remove_host(ha->host); @@ -1907,10 +3505,15 @@ static void qla4xxx_config_dma_addressing(struct scsi_qla_host *ha) static int qla4xxx_slave_alloc(struct scsi_device *sdev) { - struct iscsi_cls_session *sess = starget_to_session(sdev->sdev_target); - struct ddb_entry *ddb = sess->dd_data; + struct iscsi_cls_session *cls_sess; + struct iscsi_session *sess; + struct ddb_entry *ddb; int queue_depth = QL4_DEF_QDEPTH; + cls_sess = starget_to_session(sdev->sdev_target); + sess = cls_sess->dd_data; + ddb = sess->dd_data; + sdev->hostdata = ddb; sdev->tagged_supported = 1; @@ -2248,7 +3851,7 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) int return_status = FAILED; struct scsi_qla_host *ha; - ha = (struct scsi_qla_host *) cmd->device->host->hostdata; + ha = to_qla_host(cmd->device->host); if (ql4xdontresethba) { DEBUG2(printk("scsi%ld: %s: Don't Reset HBA\n", @@ -2284,6 +3887,110 @@ static int qla4xxx_eh_host_reset(struct scsi_cmnd *cmd) return return_status; } +static int qla4xxx_context_reset(struct scsi_qla_host *ha) +{ + uint32_t mbox_cmd[MBOX_REG_COUNT]; + uint32_t mbox_sts[MBOX_REG_COUNT]; + struct addr_ctrl_blk_def *acb = NULL; + uint32_t acb_len = sizeof(struct addr_ctrl_blk_def); + int rval = QLA_SUCCESS; + dma_addr_t acb_dma; + + acb = dma_alloc_coherent(&ha->pdev->dev, + sizeof(struct addr_ctrl_blk_def), + &acb_dma, GFP_KERNEL); + if (!acb) { + ql4_printk(KERN_ERR, ha, "%s: Unable to alloc acb\n", + __func__); + rval = -ENOMEM; + goto exit_port_reset; + } + + memset(acb, 0, acb_len); + + rval = qla4xxx_get_acb(ha, acb_dma, PRIMARI_ACB, acb_len); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + + rval = qla4xxx_disable_acb(ha); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + + wait_for_completion_timeout(&ha->disable_acb_comp, + DISABLE_ACB_TOV * HZ); + + rval = qla4xxx_set_acb(ha, &mbox_cmd[0], &mbox_sts[0], acb_dma); + if (rval != QLA_SUCCESS) { + rval = -EIO; + goto exit_free_acb; + } + +exit_free_acb: + dma_free_coherent(&ha->pdev->dev, sizeof(struct addr_ctrl_blk_def), + acb, acb_dma); +exit_port_reset: + DEBUG2(ql4_printk(KERN_INFO, ha, "%s %s\n", __func__, + rval == QLA_SUCCESS ? "SUCCEEDED" : "FAILED")); + return rval; +} + +static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type) +{ + struct scsi_qla_host *ha = to_qla_host(shost); + int rval = QLA_SUCCESS; + + if (ql4xdontresethba) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Don't Reset HBA\n", + __func__)); + rval = -EPERM; + goto exit_host_reset; + } + + rval = qla4xxx_wait_for_hba_online(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host " + "adapter\n", __func__)); + rval = -EIO; + goto exit_host_reset; + } + + if (test_bit(DPC_RESET_HA, &ha->dpc_flags)) + goto recover_adapter; + + switch (reset_type) { + case SCSI_ADAPTER_RESET: + set_bit(DPC_RESET_HA, &ha->dpc_flags); + break; + case SCSI_FIRMWARE_RESET: + if (!test_bit(DPC_RESET_HA, &ha->dpc_flags)) { + if (is_qla8022(ha)) + /* set firmware context reset */ + set_bit(DPC_RESET_HA_FW_CONTEXT, + &ha->dpc_flags); + else { + rval = qla4xxx_context_reset(ha); + goto exit_host_reset; + } + } + break; + } + +recover_adapter: + rval = qla4xxx_recover_adapter(ha); + if (rval != QLA_SUCCESS) { + DEBUG2(ql4_printk(KERN_INFO, ha, "%s: recover adapter fail\n", + __func__)); + rval = -EIO; + } + +exit_host_reset: + return rval; +} + /* PCI AER driver recovers from all correctable errors w/o * driver intervention. For uncorrectable errors PCI AER * driver calls the following device driver's callbacks @@ -2360,7 +4067,8 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) if (test_bit(AF_ONLINE, &ha->flags)) { clear_bit(AF_ONLINE, &ha->flags); - qla4xxx_mark_all_devices_missing(ha); + clear_bit(AF_LINK_UP, &ha->flags); + iscsi_host_for_each_session(ha->host, qla4xxx_fail_session); qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); } @@ -2407,7 +4115,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) qla4_8xxx_idc_unlock(ha); clear_bit(AF_FW_RECOVERY, &ha->flags); - rval = qla4xxx_initialize_adapter(ha, PRESERVE_DDB_LIST); + rval = qla4xxx_initialize_adapter(ha); qla4_8xxx_idc_lock(ha); if (rval != QLA_SUCCESS) { @@ -2443,8 +4151,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha) if ((qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE) == QLA82XX_DEV_READY)) { clear_bit(AF_FW_RECOVERY, &ha->flags); - rval = qla4xxx_initialize_adapter(ha, - PRESERVE_DDB_LIST); + rval = qla4xxx_initialize_adapter(ha); if (rval == QLA_SUCCESS) { ret = qla4xxx_request_irqs(ha); if (ret) { diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h index 610492877253..c15347d3f532 100644 --- a/drivers/scsi/qla4xxx/ql4_version.h +++ b/drivers/scsi/qla4xxx/ql4_version.h @@ -5,4 +5,4 @@ * See LICENSE.qla4xxx for copyright and licensing details. */ -#define QLA4XXX_DRIVER_VERSION "5.02.00-k7" +#define QLA4XXX_DRIVER_VERSION "5.02.00-k8" diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9689d41c7888..e40dc1cb09a0 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c @@ -880,7 +880,7 @@ static inline void cmd_frob(struct Command_Entry *cmd, struct scsi_cmnd *Cmnd, cmd->control_flags |= CFLAG_WRITE; else cmd->control_flags |= CFLAG_READ; - cmd->time_out = 30; + cmd->time_out = Cmnd->request->timeout/HZ; memcpy(cmd->cdb, Cmnd->cmnd, Cmnd->cmd_len); } diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index a4b9cdbaaa0b..dc6131e6a1ba 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c @@ -293,8 +293,16 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) * so that we can deal with it there. */ if (scmd->device->expecting_cc_ua) { - scmd->device->expecting_cc_ua = 0; - return NEEDS_RETRY; + /* + * Because some device does not queue unit + * attentions correctly, we carefully check + * additional sense code and qualifier so as + * not to squash media change unit attention. + */ + if (sshdr.asc != 0x28 || sshdr.ascq != 0x00) { + scmd->device->expecting_cc_ua = 0; + return NEEDS_RETRY; + } } /* * if the device is in the process of becoming ready, we diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index e0bd3f790fca..04c2a278076e 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -246,6 +246,43 @@ show_shost_active_mode(struct device *dev, static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL); +static int check_reset_type(char *str) +{ + if (strncmp(str, "adapter", 10) == 0) + return SCSI_ADAPTER_RESET; + else if (strncmp(str, "firmware", 10) == 0) + return SCSI_FIRMWARE_RESET; + else + return 0; +} + +static ssize_t +store_host_reset(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct Scsi_Host *shost = class_to_shost(dev); + struct scsi_host_template *sht = shost->hostt; + int ret = -EINVAL; + char str[10]; + int type; + + sscanf(buf, "%s", str); + type = check_reset_type(str); + + if (!type) + goto exit_store_host_reset; + + if (sht->host_reset) + ret = sht->host_reset(shost, type); + +exit_store_host_reset: + if (ret == 0) + ret = count; + return ret; +} + +static DEVICE_ATTR(host_reset, S_IWUSR, NULL, store_host_reset); + shost_rd_attr(unique_id, "%u\n"); shost_rd_attr(host_busy, "%hu\n"); shost_rd_attr(cmd_per_lun, "%hd\n"); @@ -272,6 +309,7 @@ static struct attribute *scsi_sysfs_shost_attrs[] = { &dev_attr_active_mode.attr, &dev_attr_prot_capabilities.attr, &dev_attr_prot_guard_type.attr, + &dev_attr_host_reset.attr, NULL }; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 3fd16d7212de..1bcd65a509e6 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -23,6 +23,8 @@ #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/bsg-lib.h> +#include <linux/idr.h> #include <net/tcp.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -31,10 +33,7 @@ #include <scsi/scsi_transport_iscsi.h> #include <scsi/iscsi_if.h> #include <scsi/scsi_cmnd.h> - -#define ISCSI_SESSION_ATTRS 23 -#define ISCSI_CONN_ATTRS 13 -#define ISCSI_HOST_ATTRS 4 +#include <scsi/scsi_bsg_iscsi.h> #define ISCSI_TRANSPORT_VERSION "2.0-870" @@ -76,16 +75,14 @@ struct iscsi_internal { struct list_head list; struct device dev; - struct device_attribute *host_attrs[ISCSI_HOST_ATTRS + 1]; struct transport_container conn_cont; - struct device_attribute *conn_attrs[ISCSI_CONN_ATTRS + 1]; struct transport_container session_cont; - struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1]; }; static atomic_t iscsi_session_nr; /* sysfs session id for next new session */ static struct workqueue_struct *iscsi_eh_timer_workq; +static DEFINE_IDA(iscsi_sess_ida); /* * list of registered transports and lock that must * be held while accessing list. The iscsi_transport_lock must @@ -270,6 +267,291 @@ struct iscsi_endpoint *iscsi_lookup_endpoint(u64 handle) } EXPORT_SYMBOL_GPL(iscsi_lookup_endpoint); +/* + * Interface to display network param to sysfs + */ + +static void iscsi_iface_release(struct device *dev) +{ + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); + struct device *parent = iface->dev.parent; + + kfree(iface); + put_device(parent); +} + + +static struct class iscsi_iface_class = { + .name = "iscsi_iface", + .dev_release = iscsi_iface_release, +}; + +#define ISCSI_IFACE_ATTR(_prefix, _name, _mode, _show, _store) \ +struct device_attribute dev_attr_##_prefix##_##_name = \ + __ATTR(_name, _mode, _show, _store) + +/* iface attrs show */ +#define iscsi_iface_attr_show(type, name, param_type, param) \ +static ssize_t \ +show_##type##_##name(struct device *dev, struct device_attribute *attr, \ + char *buf) \ +{ \ + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); \ + struct iscsi_transport *t = iface->transport; \ + return t->get_iface_param(iface, param_type, param, buf); \ +} \ + +#define iscsi_iface_net_attr(type, name, param) \ + iscsi_iface_attr_show(type, name, ISCSI_NET_PARAM, param) \ +static ISCSI_IFACE_ATTR(type, name, S_IRUGO, show_##type##_##name, NULL); + +/* generic read only ipvi4 attribute */ +iscsi_iface_net_attr(ipv4_iface, ipaddress, ISCSI_NET_PARAM_IPV4_ADDR); +iscsi_iface_net_attr(ipv4_iface, gateway, ISCSI_NET_PARAM_IPV4_GW); +iscsi_iface_net_attr(ipv4_iface, subnet, ISCSI_NET_PARAM_IPV4_SUBNET); +iscsi_iface_net_attr(ipv4_iface, bootproto, ISCSI_NET_PARAM_IPV4_BOOTPROTO); + +/* generic read only ipv6 attribute */ +iscsi_iface_net_attr(ipv6_iface, ipaddress, ISCSI_NET_PARAM_IPV6_ADDR); +iscsi_iface_net_attr(ipv6_iface, link_local_addr, ISCSI_NET_PARAM_IPV6_LINKLOCAL); +iscsi_iface_net_attr(ipv6_iface, router_addr, ISCSI_NET_PARAM_IPV6_ROUTER); +iscsi_iface_net_attr(ipv6_iface, ipaddr_autocfg, + ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG); +iscsi_iface_net_attr(ipv6_iface, link_local_autocfg, + ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG); + +/* common read only iface attribute */ +iscsi_iface_net_attr(iface, enabled, ISCSI_NET_PARAM_IFACE_ENABLE); +iscsi_iface_net_attr(iface, vlan_id, ISCSI_NET_PARAM_VLAN_ID); +iscsi_iface_net_attr(iface, vlan_priority, ISCSI_NET_PARAM_VLAN_PRIORITY); +iscsi_iface_net_attr(iface, vlan_enabled, ISCSI_NET_PARAM_VLAN_ENABLED); +iscsi_iface_net_attr(iface, mtu, ISCSI_NET_PARAM_MTU); +iscsi_iface_net_attr(iface, port, ISCSI_NET_PARAM_PORT); + +static mode_t iscsi_iface_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct iscsi_iface *iface = iscsi_dev_to_iface(dev); + struct iscsi_transport *t = iface->transport; + int param; + + if (attr == &dev_attr_iface_enabled.attr) + param = ISCSI_NET_PARAM_IFACE_ENABLE; + else if (attr == &dev_attr_iface_vlan_id.attr) + param = ISCSI_NET_PARAM_VLAN_ID; + else if (attr == &dev_attr_iface_vlan_priority.attr) + param = ISCSI_NET_PARAM_VLAN_PRIORITY; + else if (attr == &dev_attr_iface_vlan_enabled.attr) + param = ISCSI_NET_PARAM_VLAN_ENABLED; + else if (attr == &dev_attr_iface_mtu.attr) + param = ISCSI_NET_PARAM_MTU; + else if (attr == &dev_attr_iface_port.attr) + param = ISCSI_NET_PARAM_PORT; + else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV4) { + if (attr == &dev_attr_ipv4_iface_ipaddress.attr) + param = ISCSI_NET_PARAM_IPV4_ADDR; + else if (attr == &dev_attr_ipv4_iface_gateway.attr) + param = ISCSI_NET_PARAM_IPV4_GW; + else if (attr == &dev_attr_ipv4_iface_subnet.attr) + param = ISCSI_NET_PARAM_IPV4_SUBNET; + else if (attr == &dev_attr_ipv4_iface_bootproto.attr) + param = ISCSI_NET_PARAM_IPV4_BOOTPROTO; + else + return 0; + } else if (iface->iface_type == ISCSI_IFACE_TYPE_IPV6) { + if (attr == &dev_attr_ipv6_iface_ipaddress.attr) + param = ISCSI_NET_PARAM_IPV6_ADDR; + else if (attr == &dev_attr_ipv6_iface_link_local_addr.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL; + else if (attr == &dev_attr_ipv6_iface_router_addr.attr) + param = ISCSI_NET_PARAM_IPV6_ROUTER; + else if (attr == &dev_attr_ipv6_iface_ipaddr_autocfg.attr) + param = ISCSI_NET_PARAM_IPV6_ADDR_AUTOCFG; + else if (attr == &dev_attr_ipv6_iface_link_local_autocfg.attr) + param = ISCSI_NET_PARAM_IPV6_LINKLOCAL_AUTOCFG; + else + return 0; + } else { + WARN_ONCE(1, "Invalid iface attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_NET_PARAM, param); +} + +static struct attribute *iscsi_iface_attrs[] = { + &dev_attr_iface_enabled.attr, + &dev_attr_iface_vlan_id.attr, + &dev_attr_iface_vlan_priority.attr, + &dev_attr_iface_vlan_enabled.attr, + &dev_attr_ipv4_iface_ipaddress.attr, + &dev_attr_ipv4_iface_gateway.attr, + &dev_attr_ipv4_iface_subnet.attr, + &dev_attr_ipv4_iface_bootproto.attr, + &dev_attr_ipv6_iface_ipaddress.attr, + &dev_attr_ipv6_iface_link_local_addr.attr, + &dev_attr_ipv6_iface_router_addr.attr, + &dev_attr_ipv6_iface_ipaddr_autocfg.attr, + &dev_attr_ipv6_iface_link_local_autocfg.attr, + &dev_attr_iface_mtu.attr, + &dev_attr_iface_port.attr, + NULL, +}; + +static struct attribute_group iscsi_iface_group = { + .attrs = iscsi_iface_attrs, + .is_visible = iscsi_iface_attr_is_visible, +}; + +struct iscsi_iface * +iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, + uint32_t iface_type, uint32_t iface_num, int dd_size) +{ + struct iscsi_iface *iface; + int err; + + iface = kzalloc(sizeof(*iface) + dd_size, GFP_KERNEL); + if (!iface) + return NULL; + + iface->transport = transport; + iface->iface_type = iface_type; + iface->iface_num = iface_num; + iface->dev.release = iscsi_iface_release; + iface->dev.class = &iscsi_iface_class; + /* parent reference released in iscsi_iface_release */ + iface->dev.parent = get_device(&shost->shost_gendev); + if (iface_type == ISCSI_IFACE_TYPE_IPV4) + dev_set_name(&iface->dev, "ipv4-iface-%u-%u", shost->host_no, + iface_num); + else + dev_set_name(&iface->dev, "ipv6-iface-%u-%u", shost->host_no, + iface_num); + + err = device_register(&iface->dev); + if (err) + goto free_iface; + + err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); + if (err) + goto unreg_iface; + + if (dd_size) + iface->dd_data = &iface[1]; + return iface; + +unreg_iface: + device_unregister(&iface->dev); + return NULL; + +free_iface: + put_device(iface->dev.parent); + kfree(iface); + return NULL; +} +EXPORT_SYMBOL_GPL(iscsi_create_iface); + +void iscsi_destroy_iface(struct iscsi_iface *iface) +{ + sysfs_remove_group(&iface->dev.kobj, &iscsi_iface_group); + device_unregister(&iface->dev); +} +EXPORT_SYMBOL_GPL(iscsi_destroy_iface); + +/* + * BSG support + */ +/** + * iscsi_bsg_host_dispatch - Dispatch command to LLD. + * @job: bsg job to be processed + */ +static int iscsi_bsg_host_dispatch(struct bsg_job *job) +{ + struct Scsi_Host *shost = iscsi_job_to_shost(job); + struct iscsi_bsg_request *req = job->request; + struct iscsi_bsg_reply *reply = job->reply; + struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + int cmdlen = sizeof(uint32_t); /* start with length of msgcode */ + int ret; + + /* check if we have the msgcode value at least */ + if (job->request_len < sizeof(uint32_t)) { + ret = -ENOMSG; + goto fail_host_msg; + } + + /* Validate the host command */ + switch (req->msgcode) { + case ISCSI_BSG_HST_VENDOR: + cmdlen += sizeof(struct iscsi_bsg_host_vendor); + if ((shost->hostt->vendor_id == 0L) || + (req->rqst_data.h_vendor.vendor_id != + shost->hostt->vendor_id)) { + ret = -ESRCH; + goto fail_host_msg; + } + break; + default: + ret = -EBADR; + goto fail_host_msg; + } + + /* check if we really have all the request data needed */ + if (job->request_len < cmdlen) { + ret = -ENOMSG; + goto fail_host_msg; + } + + ret = i->iscsi_transport->bsg_request(job); + if (!ret) + return 0; + +fail_host_msg: + /* return the errno failure code as the only status */ + BUG_ON(job->reply_len < sizeof(uint32_t)); + reply->reply_payload_rcv_len = 0; + reply->result = ret; + job->reply_len = sizeof(uint32_t); + bsg_job_done(job, ret, 0); + return 0; +} + +/** + * iscsi_bsg_host_add - Create and add the bsg hooks to receive requests + * @shost: shost for iscsi_host + * @cls_host: iscsi_cls_host adding the structures to + */ +static int +iscsi_bsg_host_add(struct Scsi_Host *shost, struct iscsi_cls_host *ihost) +{ + struct device *dev = &shost->shost_gendev; + struct iscsi_internal *i = to_iscsi_internal(shost->transportt); + struct request_queue *q; + char bsg_name[20]; + int ret; + + if (!i->iscsi_transport->bsg_request) + return -ENOTSUPP; + + snprintf(bsg_name, sizeof(bsg_name), "iscsi_host%d", shost->host_no); + + q = __scsi_alloc_queue(shost, bsg_request_fn); + if (!q) + return -ENOMEM; + + ret = bsg_setup_queue(dev, q, bsg_name, iscsi_bsg_host_dispatch, 0); + if (ret) { + shost_printk(KERN_ERR, shost, "bsg interface failed to " + "initialize - no request queue\n"); + blk_cleanup_queue(q); + return ret; + } + + ihost->bsg_q = q; + return 0; +} + static int iscsi_setup_host(struct transport_container *tc, struct device *dev, struct device *cdev) { @@ -279,13 +561,30 @@ static int iscsi_setup_host(struct transport_container *tc, struct device *dev, memset(ihost, 0, sizeof(*ihost)); atomic_set(&ihost->nr_scans, 0); mutex_init(&ihost->mutex); + + iscsi_bsg_host_add(shost, ihost); + /* ignore any bsg add error - we just can't do sgio */ + + return 0; +} + +static int iscsi_remove_host(struct transport_container *tc, + struct device *dev, struct device *cdev) +{ + struct Scsi_Host *shost = dev_to_shost(dev); + struct iscsi_cls_host *ihost = shost->shost_data; + + if (ihost->bsg_q) { + bsg_remove_queue(ihost->bsg_q); + blk_cleanup_queue(ihost->bsg_q); + } return 0; } static DECLARE_TRANSPORT_CLASS(iscsi_host_class, "iscsi_host", iscsi_setup_host, - NULL, + iscsi_remove_host, NULL); static DECLARE_TRANSPORT_CLASS(iscsi_session_class, @@ -404,6 +703,19 @@ int iscsi_session_chkready(struct iscsi_cls_session *session) } EXPORT_SYMBOL_GPL(iscsi_session_chkready); +int iscsi_is_session_online(struct iscsi_cls_session *session) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&session->lock, flags); + if (session->state == ISCSI_SESSION_LOGGED_IN) + ret = 1; + spin_unlock_irqrestore(&session->lock, flags); + return ret; +} +EXPORT_SYMBOL_GPL(iscsi_is_session_online); + static void iscsi_session_release(struct device *dev) { struct iscsi_cls_session *session = iscsi_dev_to_session(dev); @@ -680,6 +992,7 @@ static void __iscsi_unbind_session(struct work_struct *work) struct Scsi_Host *shost = iscsi_session_to_shost(session); struct iscsi_cls_host *ihost = shost->shost_data; unsigned long flags; + unsigned int target_id; ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n"); @@ -691,10 +1004,15 @@ static void __iscsi_unbind_session(struct work_struct *work) mutex_unlock(&ihost->mutex); return; } + + target_id = session->target_id; session->target_id = ISCSI_MAX_TARGET; spin_unlock_irqrestore(&session->lock, flags); mutex_unlock(&ihost->mutex); + if (session->ida_used) + ida_simple_remove(&iscsi_sess_ida, target_id); + scsi_remove_target(&session->dev); iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION); ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n"); @@ -735,59 +1053,36 @@ iscsi_alloc_session(struct Scsi_Host *shost, struct iscsi_transport *transport, } EXPORT_SYMBOL_GPL(iscsi_alloc_session); -static int iscsi_get_next_target_id(struct device *dev, void *data) -{ - struct iscsi_cls_session *session; - unsigned long flags; - int err = 0; - - if (!iscsi_is_session_dev(dev)) - return 0; - - session = iscsi_dev_to_session(dev); - spin_lock_irqsave(&session->lock, flags); - if (*((unsigned int *) data) == session->target_id) - err = -EEXIST; - spin_unlock_irqrestore(&session->lock, flags); - return err; -} - int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) { struct Scsi_Host *shost = iscsi_session_to_shost(session); struct iscsi_cls_host *ihost; unsigned long flags; - unsigned int id = target_id; + int id = 0; int err; ihost = shost->shost_data; session->sid = atomic_add_return(1, &iscsi_session_nr); - if (id == ISCSI_MAX_TARGET) { - for (id = 0; id < ISCSI_MAX_TARGET; id++) { - err = device_for_each_child(&shost->shost_gendev, &id, - iscsi_get_next_target_id); - if (!err) - break; - } + if (target_id == ISCSI_MAX_TARGET) { + id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL); - if (id == ISCSI_MAX_TARGET) { + if (id < 0) { iscsi_cls_session_printk(KERN_ERR, session, - "Too many iscsi targets. Max " - "number of targets is %d.\n", - ISCSI_MAX_TARGET - 1); - err = -EOVERFLOW; - goto release_host; + "Failure in Target ID Allocation\n"); + return id; } - } - session->target_id = id; + session->target_id = (unsigned int)id; + session->ida_used = true; + } else + session->target_id = target_id; dev_set_name(&session->dev, "session%u", session->sid); err = device_add(&session->dev); if (err) { iscsi_cls_session_printk(KERN_ERR, session, "could not register session's dev\n"); - goto release_host; + goto release_ida; } transport_register_device(&session->dev); @@ -799,8 +1094,10 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id) ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n"); return 0; -release_host: - scsi_host_put(shost); +release_ida: + if (session->ida_used) + ida_simple_remove(&iscsi_sess_ida, session->target_id); + return err; } EXPORT_SYMBOL_GPL(iscsi_add_session); @@ -1144,6 +1441,40 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error) } EXPORT_SYMBOL_GPL(iscsi_conn_error_event); +void iscsi_conn_login_event(struct iscsi_cls_conn *conn, + enum iscsi_conn_state state) +{ + struct nlmsghdr *nlh; + struct sk_buff *skb; + struct iscsi_uevent *ev; + struct iscsi_internal *priv; + int len = NLMSG_SPACE(sizeof(*ev)); + + priv = iscsi_if_transport_lookup(conn->transport); + if (!priv) + return; + + skb = alloc_skb(len, GFP_ATOMIC); + if (!skb) { + iscsi_cls_conn_printk(KERN_ERR, conn, "gracefully ignored " + "conn login (%d)\n", state); + return; + } + + nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0); + ev = NLMSG_DATA(nlh); + ev->transport_handle = iscsi_handle(conn->transport); + ev->type = ISCSI_KEVENT_CONN_LOGIN_STATE; + ev->r.conn_login.state = state; + ev->r.conn_login.cid = conn->cid; + ev->r.conn_login.sid = iscsi_conn_get_sid(conn); + iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC); + + iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn login (%d)\n", + state); +} +EXPORT_SYMBOL_GPL(iscsi_conn_login_event); + static int iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi, void *payload, int size) @@ -1558,6 +1889,29 @@ iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev) } static int +iscsi_set_iface_params(struct iscsi_transport *transport, + struct iscsi_uevent *ev, uint32_t len) +{ + char *data = (char *)ev + sizeof(*ev); + struct Scsi_Host *shost; + int err; + + if (!transport->set_iface_param) + return -ENOSYS; + + shost = scsi_host_lookup(ev->u.set_iface_params.host_no); + if (!shost) { + printk(KERN_ERR "set_iface_params could not find host no %u\n", + ev->u.set_iface_params.host_no); + return -ENODEV; + } + + err = transport->set_iface_param(shost, data, len); + scsi_host_put(shost); + return err; +} + +static int iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) { int err = 0; @@ -1696,6 +2050,10 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group) case ISCSI_UEVENT_PATH_UPDATE: err = iscsi_set_path(transport, ev); break; + case ISCSI_UEVENT_SET_IFACE_PARAMS: + err = iscsi_set_iface_params(transport, ev, + nlmsg_attrlen(nlh, sizeof(*ev))); + break; default: err = -ENOSYS; break; @@ -1824,6 +2182,70 @@ static ISCSI_CLASS_ATTR(conn, field, S_IRUGO, \ iscsi_conn_ep_attr(address, ISCSI_PARAM_CONN_ADDRESS); iscsi_conn_ep_attr(port, ISCSI_PARAM_CONN_PORT); +static struct attribute *iscsi_conn_attrs[] = { + &dev_attr_conn_max_recv_dlength.attr, + &dev_attr_conn_max_xmit_dlength.attr, + &dev_attr_conn_header_digest.attr, + &dev_attr_conn_data_digest.attr, + &dev_attr_conn_ifmarker.attr, + &dev_attr_conn_ofmarker.attr, + &dev_attr_conn_address.attr, + &dev_attr_conn_port.attr, + &dev_attr_conn_exp_statsn.attr, + &dev_attr_conn_persistent_address.attr, + &dev_attr_conn_persistent_port.attr, + &dev_attr_conn_ping_tmo.attr, + &dev_attr_conn_recv_tmo.attr, + NULL, +}; + +static mode_t iscsi_conn_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct iscsi_cls_conn *conn = transport_class_to_conn(cdev); + struct iscsi_transport *t = conn->transport; + int param; + + if (attr == &dev_attr_conn_max_recv_dlength.attr) + param = ISCSI_PARAM_MAX_RECV_DLENGTH; + else if (attr == &dev_attr_conn_max_xmit_dlength.attr) + param = ISCSI_PARAM_MAX_XMIT_DLENGTH; + else if (attr == &dev_attr_conn_header_digest.attr) + param = ISCSI_PARAM_HDRDGST_EN; + else if (attr == &dev_attr_conn_data_digest.attr) + param = ISCSI_PARAM_DATADGST_EN; + else if (attr == &dev_attr_conn_ifmarker.attr) + param = ISCSI_PARAM_IFMARKER_EN; + else if (attr == &dev_attr_conn_ofmarker.attr) + param = ISCSI_PARAM_OFMARKER_EN; + else if (attr == &dev_attr_conn_address.attr) + param = ISCSI_PARAM_CONN_ADDRESS; + else if (attr == &dev_attr_conn_port.attr) + param = ISCSI_PARAM_CONN_PORT; + else if (attr == &dev_attr_conn_exp_statsn.attr) + param = ISCSI_PARAM_EXP_STATSN; + else if (attr == &dev_attr_conn_persistent_address.attr) + param = ISCSI_PARAM_PERSISTENT_ADDRESS; + else if (attr == &dev_attr_conn_persistent_port.attr) + param = ISCSI_PARAM_PERSISTENT_PORT; + else if (attr == &dev_attr_conn_ping_tmo.attr) + param = ISCSI_PARAM_PING_TMO; + else if (attr == &dev_attr_conn_recv_tmo.attr) + param = ISCSI_PARAM_RECV_TMO; + else { + WARN_ONCE(1, "Invalid conn attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_PARAM, param); +} + +static struct attribute_group iscsi_conn_group = { + .attrs = iscsi_conn_attrs, + .is_visible = iscsi_conn_attr_is_visible, +}; + /* * iSCSI session attrs */ @@ -1845,7 +2267,6 @@ show_session_param_##param(struct device *dev, \ iscsi_session_attr_show(param, perm) \ static ISCSI_CLASS_ATTR(sess, field, S_IRUGO, show_session_param_##param, \ NULL); - iscsi_session_attr(targetname, ISCSI_PARAM_TARGET_NAME, 0); iscsi_session_attr(initial_r2t, ISCSI_PARAM_INITIAL_R2T_EN, 0); iscsi_session_attr(max_outstanding_r2t, ISCSI_PARAM_MAX_R2T, 0); @@ -1922,6 +2343,100 @@ static ISCSI_CLASS_ATTR(priv_sess, field, S_IRUGO | S_IWUSR, \ store_priv_session_##field) iscsi_priv_session_rw_attr(recovery_tmo, "%d"); +static struct attribute *iscsi_session_attrs[] = { + &dev_attr_sess_initial_r2t.attr, + &dev_attr_sess_max_outstanding_r2t.attr, + &dev_attr_sess_immediate_data.attr, + &dev_attr_sess_first_burst_len.attr, + &dev_attr_sess_max_burst_len.attr, + &dev_attr_sess_data_pdu_in_order.attr, + &dev_attr_sess_data_seq_in_order.attr, + &dev_attr_sess_erl.attr, + &dev_attr_sess_targetname.attr, + &dev_attr_sess_tpgt.attr, + &dev_attr_sess_password.attr, + &dev_attr_sess_password_in.attr, + &dev_attr_sess_username.attr, + &dev_attr_sess_username_in.attr, + &dev_attr_sess_fast_abort.attr, + &dev_attr_sess_abort_tmo.attr, + &dev_attr_sess_lu_reset_tmo.attr, + &dev_attr_sess_tgt_reset_tmo.attr, + &dev_attr_sess_ifacename.attr, + &dev_attr_sess_initiatorname.attr, + &dev_attr_sess_targetalias.attr, + &dev_attr_priv_sess_recovery_tmo.attr, + &dev_attr_priv_sess_state.attr, + NULL, +}; + +static mode_t iscsi_session_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct iscsi_cls_session *session = transport_class_to_session(cdev); + struct iscsi_transport *t = session->transport; + int param; + + if (attr == &dev_attr_sess_initial_r2t.attr) + param = ISCSI_PARAM_INITIAL_R2T_EN; + else if (attr == &dev_attr_sess_max_outstanding_r2t.attr) + param = ISCSI_PARAM_MAX_R2T; + else if (attr == &dev_attr_sess_immediate_data.attr) + param = ISCSI_PARAM_IMM_DATA_EN; + else if (attr == &dev_attr_sess_first_burst_len.attr) + param = ISCSI_PARAM_FIRST_BURST; + else if (attr == &dev_attr_sess_max_burst_len.attr) + param = ISCSI_PARAM_MAX_BURST; + else if (attr == &dev_attr_sess_data_pdu_in_order.attr) + param = ISCSI_PARAM_PDU_INORDER_EN; + else if (attr == &dev_attr_sess_data_seq_in_order.attr) + param = ISCSI_PARAM_DATASEQ_INORDER_EN; + else if (attr == &dev_attr_sess_erl.attr) + param = ISCSI_PARAM_ERL; + else if (attr == &dev_attr_sess_targetname.attr) + param = ISCSI_PARAM_TARGET_NAME; + else if (attr == &dev_attr_sess_tpgt.attr) + param = ISCSI_PARAM_TPGT; + else if (attr == &dev_attr_sess_password.attr) + param = ISCSI_PARAM_USERNAME; + else if (attr == &dev_attr_sess_password_in.attr) + param = ISCSI_PARAM_USERNAME_IN; + else if (attr == &dev_attr_sess_username.attr) + param = ISCSI_PARAM_PASSWORD; + else if (attr == &dev_attr_sess_username_in.attr) + param = ISCSI_PARAM_PASSWORD_IN; + else if (attr == &dev_attr_sess_fast_abort.attr) + param = ISCSI_PARAM_FAST_ABORT; + else if (attr == &dev_attr_sess_abort_tmo.attr) + param = ISCSI_PARAM_ABORT_TMO; + else if (attr == &dev_attr_sess_lu_reset_tmo.attr) + param = ISCSI_PARAM_LU_RESET_TMO; + else if (attr == &dev_attr_sess_tgt_reset_tmo.attr) + param = ISCSI_PARAM_TGT_RESET_TMO; + else if (attr == &dev_attr_sess_ifacename.attr) + param = ISCSI_PARAM_IFACE_NAME; + else if (attr == &dev_attr_sess_initiatorname.attr) + param = ISCSI_PARAM_INITIATOR_NAME; + else if (attr == &dev_attr_sess_targetalias.attr) + param = ISCSI_PARAM_TARGET_ALIAS; + else if (attr == &dev_attr_priv_sess_recovery_tmo.attr) + return S_IRUGO | S_IWUSR; + else if (attr == &dev_attr_priv_sess_state.attr) + return S_IRUGO; + else { + WARN_ONCE(1, "Invalid session attr"); + return 0; + } + + return t->attr_is_visible(ISCSI_PARAM, param); +} + +static struct attribute_group iscsi_session_group = { + .attrs = iscsi_session_attrs, + .is_visible = iscsi_session_attr_is_visible, +}; + /* * iSCSI host attrs */ @@ -1945,41 +2460,42 @@ iscsi_host_attr(hwaddress, ISCSI_HOST_PARAM_HWADDRESS); iscsi_host_attr(ipaddress, ISCSI_HOST_PARAM_IPADDRESS); iscsi_host_attr(initiatorname, ISCSI_HOST_PARAM_INITIATOR_NAME); -#define SETUP_PRIV_SESSION_RD_ATTR(field) \ -do { \ - priv->session_attrs[count] = &dev_attr_priv_sess_##field; \ - count++; \ -} while (0) - -#define SETUP_PRIV_SESSION_RW_ATTR(field) \ -do { \ - priv->session_attrs[count] = &dev_attr_priv_sess_##field; \ - count++; \ -} while (0) - -#define SETUP_SESSION_RD_ATTR(field, param_flag) \ -do { \ - if (tt->param_mask & param_flag) { \ - priv->session_attrs[count] = &dev_attr_sess_##field; \ - count++; \ - } \ -} while (0) +static struct attribute *iscsi_host_attrs[] = { + &dev_attr_host_netdev.attr, + &dev_attr_host_hwaddress.attr, + &dev_attr_host_ipaddress.attr, + &dev_attr_host_initiatorname.attr, + NULL, +}; -#define SETUP_CONN_RD_ATTR(field, param_flag) \ -do { \ - if (tt->param_mask & param_flag) { \ - priv->conn_attrs[count] = &dev_attr_conn_##field; \ - count++; \ - } \ -} while (0) +static mode_t iscsi_host_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int i) +{ + struct device *cdev = container_of(kobj, struct device, kobj); + struct Scsi_Host *shost = transport_class_to_shost(cdev); + struct iscsi_internal *priv = to_iscsi_internal(shost->transportt); + int param; + + if (attr == &dev_attr_host_netdev.attr) + param = ISCSI_HOST_PARAM_NETDEV_NAME; + else if (attr == &dev_attr_host_hwaddress.attr) + param = ISCSI_HOST_PARAM_HWADDRESS; + else if (attr == &dev_attr_host_ipaddress.attr) + param = ISCSI_HOST_PARAM_IPADDRESS; + else if (attr == &dev_attr_host_initiatorname.attr) + param = ISCSI_HOST_PARAM_INITIATOR_NAME; + else { + WARN_ONCE(1, "Invalid host attr"); + return 0; + } -#define SETUP_HOST_RD_ATTR(field, param_flag) \ -do { \ - if (tt->host_param_mask & param_flag) { \ - priv->host_attrs[count] = &dev_attr_host_##field; \ - count++; \ - } \ -} while (0) + return priv->iscsi_transport->attr_is_visible(ISCSI_HOST_PARAM, param); +} + +static struct attribute_group iscsi_host_group = { + .attrs = iscsi_host_attrs, + .is_visible = iscsi_host_attr_is_visible, +}; static int iscsi_session_match(struct attribute_container *cont, struct device *dev) @@ -2051,7 +2567,7 @@ iscsi_register_transport(struct iscsi_transport *tt) { struct iscsi_internal *priv; unsigned long flags; - int count = 0, err; + int err; BUG_ON(!tt); @@ -2078,77 +2594,24 @@ iscsi_register_transport(struct iscsi_transport *tt) goto unregister_dev; /* host parameters */ - priv->t.host_attrs.ac.attrs = &priv->host_attrs[0]; priv->t.host_attrs.ac.class = &iscsi_host_class.class; priv->t.host_attrs.ac.match = iscsi_host_match; + priv->t.host_attrs.ac.grp = &iscsi_host_group; priv->t.host_size = sizeof(struct iscsi_cls_host); transport_container_register(&priv->t.host_attrs); - SETUP_HOST_RD_ATTR(netdev, ISCSI_HOST_NETDEV_NAME); - SETUP_HOST_RD_ATTR(ipaddress, ISCSI_HOST_IPADDRESS); - SETUP_HOST_RD_ATTR(hwaddress, ISCSI_HOST_HWADDRESS); - SETUP_HOST_RD_ATTR(initiatorname, ISCSI_HOST_INITIATOR_NAME); - BUG_ON(count > ISCSI_HOST_ATTRS); - priv->host_attrs[count] = NULL; - count = 0; - /* connection parameters */ - priv->conn_cont.ac.attrs = &priv->conn_attrs[0]; priv->conn_cont.ac.class = &iscsi_connection_class.class; priv->conn_cont.ac.match = iscsi_conn_match; + priv->conn_cont.ac.grp = &iscsi_conn_group; transport_container_register(&priv->conn_cont); - SETUP_CONN_RD_ATTR(max_recv_dlength, ISCSI_MAX_RECV_DLENGTH); - SETUP_CONN_RD_ATTR(max_xmit_dlength, ISCSI_MAX_XMIT_DLENGTH); - SETUP_CONN_RD_ATTR(header_digest, ISCSI_HDRDGST_EN); - SETUP_CONN_RD_ATTR(data_digest, ISCSI_DATADGST_EN); - SETUP_CONN_RD_ATTR(ifmarker, ISCSI_IFMARKER_EN); - SETUP_CONN_RD_ATTR(ofmarker, ISCSI_OFMARKER_EN); - SETUP_CONN_RD_ATTR(address, ISCSI_CONN_ADDRESS); - SETUP_CONN_RD_ATTR(port, ISCSI_CONN_PORT); - SETUP_CONN_RD_ATTR(exp_statsn, ISCSI_EXP_STATSN); - SETUP_CONN_RD_ATTR(persistent_address, ISCSI_PERSISTENT_ADDRESS); - SETUP_CONN_RD_ATTR(persistent_port, ISCSI_PERSISTENT_PORT); - SETUP_CONN_RD_ATTR(ping_tmo, ISCSI_PING_TMO); - SETUP_CONN_RD_ATTR(recv_tmo, ISCSI_RECV_TMO); - - BUG_ON(count > ISCSI_CONN_ATTRS); - priv->conn_attrs[count] = NULL; - count = 0; - /* session parameters */ - priv->session_cont.ac.attrs = &priv->session_attrs[0]; priv->session_cont.ac.class = &iscsi_session_class.class; priv->session_cont.ac.match = iscsi_session_match; + priv->session_cont.ac.grp = &iscsi_session_group; transport_container_register(&priv->session_cont); - SETUP_SESSION_RD_ATTR(initial_r2t, ISCSI_INITIAL_R2T_EN); - SETUP_SESSION_RD_ATTR(max_outstanding_r2t, ISCSI_MAX_R2T); - SETUP_SESSION_RD_ATTR(immediate_data, ISCSI_IMM_DATA_EN); - SETUP_SESSION_RD_ATTR(first_burst_len, ISCSI_FIRST_BURST); - SETUP_SESSION_RD_ATTR(max_burst_len, ISCSI_MAX_BURST); - SETUP_SESSION_RD_ATTR(data_pdu_in_order, ISCSI_PDU_INORDER_EN); - SETUP_SESSION_RD_ATTR(data_seq_in_order, ISCSI_DATASEQ_INORDER_EN); - SETUP_SESSION_RD_ATTR(erl, ISCSI_ERL); - SETUP_SESSION_RD_ATTR(targetname, ISCSI_TARGET_NAME); - SETUP_SESSION_RD_ATTR(tpgt, ISCSI_TPGT); - SETUP_SESSION_RD_ATTR(password, ISCSI_USERNAME); - SETUP_SESSION_RD_ATTR(password_in, ISCSI_USERNAME_IN); - SETUP_SESSION_RD_ATTR(username, ISCSI_PASSWORD); - SETUP_SESSION_RD_ATTR(username_in, ISCSI_PASSWORD_IN); - SETUP_SESSION_RD_ATTR(fast_abort, ISCSI_FAST_ABORT); - SETUP_SESSION_RD_ATTR(abort_tmo, ISCSI_ABORT_TMO); - SETUP_SESSION_RD_ATTR(lu_reset_tmo,ISCSI_LU_RESET_TMO); - SETUP_SESSION_RD_ATTR(tgt_reset_tmo,ISCSI_TGT_RESET_TMO); - SETUP_SESSION_RD_ATTR(ifacename, ISCSI_IFACE_NAME); - SETUP_SESSION_RD_ATTR(initiatorname, ISCSI_INITIATOR_NAME); - SETUP_SESSION_RD_ATTR(targetalias, ISCSI_TARGET_ALIAS); - SETUP_PRIV_SESSION_RW_ATTR(recovery_tmo); - SETUP_PRIV_SESSION_RD_ATTR(state); - - BUG_ON(count > ISCSI_SESSION_ATTRS); - priv->session_attrs[count] = NULL; - spin_lock_irqsave(&iscsi_transport_lock, flags); list_add(&priv->list, &iscsi_transports); spin_unlock_irqrestore(&iscsi_transport_lock, flags); @@ -2210,10 +2673,14 @@ static __init int iscsi_transport_init(void) if (err) goto unregister_transport_class; - err = transport_class_register(&iscsi_host_class); + err = class_register(&iscsi_iface_class); if (err) goto unregister_endpoint_class; + err = transport_class_register(&iscsi_host_class); + if (err) + goto unregister_iface_class; + err = transport_class_register(&iscsi_connection_class); if (err) goto unregister_host_class; @@ -2243,6 +2710,8 @@ unregister_conn_class: transport_class_unregister(&iscsi_connection_class); unregister_host_class: transport_class_unregister(&iscsi_host_class); +unregister_iface_class: + class_unregister(&iscsi_iface_class); unregister_endpoint_class: class_unregister(&iscsi_endpoint_class); unregister_transport_class: @@ -2258,6 +2727,7 @@ static void __exit iscsi_transport_exit(void) transport_class_unregister(&iscsi_session_class); transport_class_unregister(&iscsi_host_class); class_unregister(&iscsi_endpoint_class); + class_unregister(&iscsi_iface_class); class_unregister(&iscsi_transport_class); } diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index c6fcf76cade5..9d9330ae4213 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -1545,8 +1545,14 @@ int sas_rphy_add(struct sas_rphy *rphy) if (identify->device_type == SAS_END_DEVICE && rphy->scsi_target_id != -1) { - scsi_scan_target(&rphy->dev, 0, - rphy->scsi_target_id, SCAN_WILD_CARD, 0); + int lun; + + if (identify->target_port_protocols & SAS_PROTOCOL_SSP) + lun = SCAN_WILD_CARD; + else + lun = 0; + + scsi_scan_target(&rphy->dev, 0, rphy->scsi_target_id, lun, 0); } return 0; diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 953773cb26d9..a7942e5c8be8 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -1066,12 +1066,13 @@ static int sd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct gendisk *disk = bdev->bd_disk; - struct scsi_device *sdp = scsi_disk(disk)->device; + struct scsi_disk *sdkp = scsi_disk(disk); + struct scsi_device *sdp = sdkp->device; void __user *p = (void __user *)arg; int error; - SCSI_LOG_IOCTL(1, printk("sd_ioctl: disk=%s, cmd=0x%x\n", - disk->disk_name, cmd)); + SCSI_LOG_IOCTL(1, sd_printk(KERN_INFO, sdkp, "sd_ioctl: disk=%s, " + "cmd=0x%x\n", disk->disk_name, cmd)); /* * If we are in the middle of error recovery, don't let anyone |