summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/mpt3sas/mpt3sas_scsih.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/mpt3sas/mpt3sas_scsih.c')
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c421
1 files changed, 337 insertions, 84 deletions
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 717ba0845a2a..c597d544eb39 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -51,7 +51,6 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pci.h>
-#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/raid_class.h>
@@ -155,6 +154,10 @@ static int prot_mask = -1;
module_param(prot_mask, int, 0444);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+static bool enable_sdev_max_qd;
+module_param(enable_sdev_max_qd, bool, 0444);
+MODULE_PARM_DESC(enable_sdev_max_qd,
+ "Enable sdev max qd as can_queue, def=disabled(0)");
/* raid transport support */
static struct raid_template *mpt3sas_raid_template;
@@ -1047,6 +1050,34 @@ mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * _scsih_set_nvme_max_shutdown_latency - Update max_shutdown_latency.
+ * @ioc: per adapter object
+ * Context: This function will acquire ioc->pcie_device_lock
+ *
+ * Update ioc->max_shutdown_latency to that NVMe drives RTD3 Entry Latency
+ * which has reported maximum among all available NVMe drives.
+ * Minimum max_shutdown_latency will be six seconds.
+ */
+static void
+_scsih_set_nvme_max_shutdown_latency(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _pcie_device *pcie_device;
+ unsigned long flags;
+ u16 shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+
+ spin_lock_irqsave(&ioc->pcie_device_lock, flags);
+ list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
+ if (pcie_device->shutdown_latency) {
+ if (shutdown_latency < pcie_device->shutdown_latency)
+ shutdown_latency =
+ pcie_device->shutdown_latency;
+ }
+ }
+ ioc->max_shutdown_latency = shutdown_latency;
+ spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+}
+
+/**
* _scsih_pcie_device_remove - remove pcie_device from list.
* @ioc: per adapter object
* @pcie_device: the pcie_device object
@@ -1060,6 +1091,7 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
{
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (!pcie_device)
return;
@@ -1079,11 +1111,21 @@ _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
list_del_init(&pcie_device->list);
was_on_pcie_device_list = 1;
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
kfree(pcie_device->serial_number);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
@@ -1098,6 +1140,7 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
struct _pcie_device *pcie_device;
unsigned long flags;
int was_on_pcie_device_list = 0;
+ u8 update_latency = 0;
if (ioc->shost_recovery)
return;
@@ -1110,12 +1153,22 @@ _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
was_on_pcie_device_list = 1;
pcie_device_put(pcie_device);
}
+ if (pcie_device->shutdown_latency == ioc->max_shutdown_latency)
+ update_latency = 1;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
if (was_on_pcie_device_list) {
_scsih_pcie_device_remove_from_sml(ioc, pcie_device);
pcie_device_put(pcie_device);
}
+
+ /*
+ * This device's RTD3 Entry Latency matches IOC's
+ * max_shutdown_latency. Recalculate IOC's max_shutdown_latency
+ * from the available drives as current drive is getting removed.
+ */
+ if (update_latency)
+ _scsih_set_nvme_max_shutdown_latency(ioc);
}
/**
@@ -1152,6 +1205,11 @@ _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ clear_bit(pcie_device->handle, ioc->pend_os_device_add);
+ return;
+ }
if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
_scsih_pcie_device_remove(ioc, pcie_device);
} else if (!pcie_device->starget) {
@@ -1196,7 +1254,9 @@ _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
spin_lock_irqsave(&ioc->pcie_device_lock, flags);
pcie_device_get(pcie_device);
list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
- _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
+ if (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED)
+ _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
}
/**
@@ -1433,17 +1493,20 @@ _scsih_is_end_device(u32 device_info)
}
/**
- * _scsih_is_nvme_device - determines if device is an nvme device
+ * _scsih_is_nvme_pciescsi_device - determines if
+ * device is an pcie nvme/scsi device
* @device_info: bitfield providing information about the device.
* Context: none
*
- * Return: 1 if nvme device.
+ * Returns 1 if device is pcie device type nvme/scsi.
*/
static int
-_scsih_is_nvme_device(u32 device_info)
+_scsih_is_nvme_pciescsi_device(u32 device_info)
{
- if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
- == MPI26_PCIE_DEVINFO_NVME)
+ if (((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_NVME) ||
+ ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
+ == MPI26_PCIE_DEVINFO_SCSI))
return 1;
else
return 0;
@@ -1509,7 +1572,13 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = shost->can_queue;
- /* limit max device queue for SATA to 32 */
+ /*
+ * limit max device queue for SATA to 32 if enable_sdev_max_qd
+ * is disabled.
+ */
+ if (ioc->enable_sdev_max_qd)
+ goto not_sata;
+
sas_device_priv_data = sdev->hostdata;
if (!sas_device_priv_data)
goto not_sata;
@@ -1535,7 +1604,31 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
max_depth = 1;
if (qdepth > max_depth)
qdepth = max_depth;
- return scsi_change_queue_depth(sdev, qdepth);
+ scsi_change_queue_depth(sdev, qdepth);
+ sdev_printk(KERN_INFO, sdev,
+ "qdepth(%d), tagged(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported,
+ sdev->scsi_level, ((sdev->inquiry[7] & 2) >> 1));
+ return sdev->queue_depth;
+}
+
+/**
+ * mpt3sas_scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (ioc->enable_sdev_max_qd)
+ qdepth = shost->can_queue;
+
+ scsih_change_queue_depth(sdev, qdepth);
}
/**
@@ -2296,7 +2389,7 @@ scsih_slave_configure(struct scsi_device *sdev)
MPT3SAS_RAID_MAX_SECTORS);
}
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
/* raid transport support */
if (!ioc->is_warpdrive)
@@ -2360,7 +2453,7 @@ scsih_slave_configure(struct scsi_device *sdev)
pcie_device_put(pcie_device);
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
/* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
** merged and can eliminate holes created during merging
** operation.
@@ -2420,7 +2513,7 @@ scsih_slave_configure(struct scsi_device *sdev)
_scsih_display_sata_capabilities(ioc, handle, sdev);
- scsih_change_queue_depth(sdev, qdepth);
+ mpt3sas_scsih_change_queue_depth(sdev, qdepth);
if (ssp_target) {
sas_read_port_mode_page(sdev);
@@ -2635,6 +2728,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
u16 smid = 0;
u32 ioc_state;
int rc;
+ u8 issue_reset = 0;
lockdep_assert_held(&ioc->tm_cmds.mutex);
@@ -2657,7 +2751,13 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
- mpt3sas_base_fault_info(ioc, ioc_state &
+ mpt3sas_print_fault_code(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
+ return (!rc) ? SUCCESS : FAILED;
+ } else if ((ioc_state & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc, ioc_state &
MPI2_DOORBELL_DATA_MASK);
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
return (!rc) ? SUCCESS : FAILED;
@@ -2688,9 +2788,10 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
ioc->put_smid_hi_priority(ioc, smid, msix_task);
wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
- if (mpt3sas_base_check_cmd_timeout(ioc,
- ioc->tm_cmds.status, mpi_request,
- sizeof(Mpi2SCSITaskManagementRequest_t)/4)) {
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->tm_cmds.status, mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4, issue_reset);
+ if (issue_reset) {
rc = mpt3sas_base_hard_reset_handler(ioc,
FORCE_BIG_HAMMER);
rc = (!rc) ? SUCCESS : FAILED;
@@ -2837,15 +2938,17 @@ scsih_abort(struct scsi_cmnd *scmd)
u8 timeout = 30;
struct _pcie_device *pcie_device = NULL;
- sdev_printk(KERN_INFO, scmd->device,
- "attempting task abort! scmd(%p)\n", scmd);
+ sdev_printk(KERN_INFO, scmd->device, "attempting task abort!"
+ "scmd(0x%p), outstanding for %u ms & timeout %u ms\n",
+ scmd, jiffies_to_msecs(jiffies - scmd->jiffies_at_alloc),
+ (scmd->request->timeout / HZ) * 1000);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2854,6 +2957,8 @@ scsih_abort(struct scsi_cmnd *scmd)
/* check for completed command */
if (st == NULL || st->cb_idx == 0xFF) {
+ sdev_printk(KERN_INFO, scmd->device, "No reference found at "
+ "driver, assuming scmd(0x%p) might have completed\n", scmd);
scmd->result = DID_RESET << 16;
r = SUCCESS;
goto out;
@@ -2872,7 +2977,8 @@ scsih_abort(struct scsi_cmnd *scmd)
handle = sas_device_priv_data->sas_target->handle;
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling))
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
timeout = ioc->nvme_abort_timeout;
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
@@ -2881,7 +2987,7 @@ scsih_abort(struct scsi_cmnd *scmd)
if (r == SUCCESS && st->cb_idx != 0xFF)
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (pcie_device)
pcie_device_put(pcie_device);
@@ -2910,14 +3016,14 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
sdev_printk(KERN_INFO, scmd->device,
- "attempting device reset! scmd(%p)\n", scmd);
+ "attempting device reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
sdev_printk(KERN_INFO, scmd->device,
- "device been deleted! scmd(%p)\n", scmd);
+ "device been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -2943,11 +3049,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling)) {
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
+
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
@@ -2955,7 +3063,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
r = FAILED;
out:
- sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -2986,15 +3094,15 @@ scsih_target_reset(struct scsi_cmnd *scmd)
struct scsi_target *starget = scmd->device->sdev_target;
struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
- starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "attempting target reset! scmd(0x%p)\n", scmd);
_scsih_tm_display_info(ioc, scmd);
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
ioc->remove_host) {
- starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
- scmd);
+ starget_printk(KERN_INFO, starget,
+ "target been deleted! scmd(0x%p)\n", scmd);
scmd->result = DID_NO_CONNECT << 16;
scmd->scsi_done(scmd);
r = SUCCESS;
@@ -3020,7 +3128,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
- if (pcie_device && (!ioc->tm_custom_handling)) {
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info)))) {
tr_timeout = pcie_device->reset_timeout;
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
@@ -3032,7 +3141,7 @@ scsih_target_reset(struct scsi_cmnd *scmd)
if (r == SUCCESS && atomic_read(&starget->target_busy))
r = FAILED;
out:
- starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(0x%p)\n",
((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
if (sas_device)
@@ -3055,7 +3164,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
int r, retval;
- ioc_info(ioc, "attempting host reset! scmd(%p)\n", scmd);
+ ioc_info(ioc, "attempting host reset! scmd(0x%p)\n", scmd);
scsi_print_command(scmd);
if (ioc->is_driver_loading || ioc->remove_host) {
@@ -3067,7 +3176,7 @@ scsih_host_reset(struct scsi_cmnd *scmd)
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
r = (retval < 0) ? FAILED : SUCCESS;
out:
- ioc_info(ioc, "host reset: %s scmd(%p)\n",
+ ioc_info(ioc, "host reset: %s scmd(0x%p)\n",
r == SUCCESS ? "SUCCESS" : "FAILED", scmd);
return r;
@@ -3598,7 +3707,9 @@ _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
sas_address = pcie_device->wwid;
}
spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
- if (pcie_device && (!ioc->tm_custom_handling))
+ if (pcie_device && (!ioc->tm_custom_handling) &&
+ (!(mpt3sas_scsih_is_pcie_scsi_device(
+ pcie_device->device_info))))
tr_method =
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
else
@@ -4431,6 +4542,7 @@ static void
_scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
Mpi2EventDataTemperature_t *event_data)
{
+ u32 doorbell;
if (ioc->temp_sensors_count >= event_data->SensorNum) {
ioc_err(ioc, "Temperature Threshold flags %s%s%s%s exceeded for Sensor: %d !!!\n",
le16_to_cpu(event_data->Status) & 0x1 ? "0 " : " ",
@@ -4440,6 +4552,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
event_data->SensorNum);
ioc_err(ioc, "Current Temp In Celsius: %d\n",
event_data->CurrentTemperature);
+ if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_print_fault_code(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ } else if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_COREDUMP) {
+ mpt3sas_print_coredump_info(ioc,
+ doorbell & MPI2_DOORBELL_DATA_MASK);
+ }
+ }
}
}
@@ -4654,11 +4778,8 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
* since we're lockless at this point
*/
do {
- if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
- scmd->result = SAM_STAT_BUSY;
- scmd->scsi_done(scmd);
- return 0;
- }
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending))
+ return SCSI_MLQUEUE_DEVICE_BUSY;
} while (_scsih_set_satl_pending(scmd, true));
if (scmd->sc_data_direction == DMA_FROM_DEVICE)
@@ -5120,7 +5241,7 @@ _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* insert into event log */
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
- event_reply = kzalloc(sz, GFP_KERNEL);
+ event_reply = kzalloc(sz, GFP_ATOMIC);
if (!event_reply) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
__FILE__, __LINE__, __func__);
@@ -6456,24 +6577,17 @@ _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
/**
* _scsih_sas_device_status_change_event - handle device status change
* @ioc: per adapter object
- * @fw_event: The fw_event_work object
+ * @event_data: The fw event
* Context: user.
*/
static void
_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
- struct fw_event_work *fw_event)
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
{
struct MPT3SAS_TARGET *target_priv_data;
struct _sas_device *sas_device;
u64 sas_address;
unsigned long flags;
- Mpi2EventDataSasDeviceStatusChange_t *event_data =
- (Mpi2EventDataSasDeviceStatusChange_t *)
- fw_event->event_data;
-
- if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
- _scsih_sas_device_status_change_event_debug(ioc,
- event_data);
/* In MPI Revision K (0xC), the internal device reset complete was
* implemented, so avoid setting tm_busy flag for older firmware.
@@ -6505,6 +6619,12 @@ _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
else
target_priv_data->tm_busy = 0;
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ ioc_info(ioc,
+ "%s tm_busy flag for handle(0x%04x)\n",
+ (target_priv_data->tm_busy == 1) ? "Enable" : "Disable",
+ target_priv_data->handle);
+
out:
if (sas_device)
sas_device_put(sas_device);
@@ -6539,6 +6659,11 @@ _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
break;
case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
desc = "PCIe device blocked";
+ ioc_info(ioc,
+ "Device with Access Status (%s): wwid(0x%016llx), "
+ "handle(0x%04x)\n ll only be added to the internal list",
+ desc, (u64)wwid, handle);
+ rc = 0;
break;
case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
desc = "PCIe device mem space access failed";
@@ -6643,7 +6768,8 @@ _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
pcie_device->enclosure_level,
pcie_device->connector_name);
- if (pcie_device->starget)
+ if (pcie_device->starget && (pcie_device->access_status !=
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED))
scsi_remove_target(&pcie_device->starget->dev);
dewtprintk(ioc,
ioc_info(ioc, "%s: exit: handle(0x%04x), wwid(0x%016llx)\n",
@@ -6694,7 +6820,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
/* check if this is end device */
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
- if (!(_scsih_is_nvme_device(device_info)))
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
return;
wwid = le64_to_cpu(pcie_device_pg0.WWID);
@@ -6709,6 +6835,7 @@ _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
if (unlikely(pcie_device->handle != handle)) {
starget = pcie_device->starget;
sas_target_priv_data = starget->hostdata;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
starget_printk(KERN_INFO, starget,
"handle changed from(0x%04x) to (0x%04x)!!!\n",
pcie_device->handle, handle);
@@ -6803,7 +6930,8 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device_pg0.AccessStatus))
return 0;
- if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
+ if (!(_scsih_is_nvme_pciescsi_device(le32_to_cpu
+ (pcie_device_pg0.DeviceInfo))))
return 0;
pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
@@ -6813,6 +6941,31 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
return 0;
}
+ /* PCIe Device Page 2 contains read-only information about a
+ * specific NVMe device; therefore, this page is only
+ * valid for NVMe devices and skip for pcie devices of type scsi.
+ */
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
+ &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle)) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ ioc_err(ioc,
+ "failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 0;
+ }
+ }
+
pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
if (!pcie_device) {
ioc_err(ioc, "failure at %s:%d/%s()!\n",
@@ -6824,6 +6977,7 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
pcie_device->id = ioc->pcie_target_id++;
pcie_device->channel = PCIE_CHANNEL;
pcie_device->handle = handle;
+ pcie_device->access_status = pcie_device_pg0.AccessStatus;
pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
pcie_device->wwid = wwid;
pcie_device->port_num = pcie_device_pg0.PortNum;
@@ -6855,27 +7009,26 @@ _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
}
/* TODO -- Add device name once FW supports it */
- if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
- &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- kfree(pcie_device);
- return 0;
- }
-
- ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
- if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
- ioc_err(ioc, "failure at %s:%d/%s()!\n",
- __FILE__, __LINE__, __func__);
- kfree(pcie_device);
- return 0;
- }
- pcie_device->nvme_mdts =
- le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
- if (pcie_device_pg2.ControllerResetTO)
- pcie_device->reset_timeout =
- pcie_device_pg2.ControllerResetTO;
- else
+ if (!(mpt3sas_scsih_is_pcie_scsi_device(
+ le32_to_cpu(pcie_device_pg0.DeviceInfo)))) {
+ pcie_device->nvme_mdts =
+ le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
+ pcie_device->shutdown_latency =
+ le16_to_cpu(pcie_device_pg2.ShutdownLatency);
+ /*
+ * Set IOC's max_shutdown_latency to drive's RTD3 Entry Latency
+ * if drive's RTD3 Entry Latency is greater then IOC's
+ * max_shutdown_latency.
+ */
+ if (pcie_device->shutdown_latency > ioc->max_shutdown_latency)
+ ioc->max_shutdown_latency =
+ pcie_device->shutdown_latency;
+ if (pcie_device_pg2.ControllerResetTO)
+ pcie_device->reset_timeout =
+ pcie_device_pg2.ControllerResetTO;
+ else
+ pcie_device->reset_timeout = 30;
+ } else
pcie_device->reset_timeout = 30;
if (ioc->wait_for_discovery_to_complete)
@@ -7606,10 +7759,9 @@ _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
- issue_reset =
- mpt3sas_base_check_cmd_timeout(ioc,
- ioc->scsih_cmds.status, mpi_request,
- sizeof(Mpi2RaidActionRequest_t)/4);
+ mpt3sas_check_cmd_timeout(ioc,
+ ioc->scsih_cmds.status, mpi_request,
+ sizeof(Mpi2RaidActionRequest_t)/4, issue_reset);
rc = -EFAULT;
goto out;
}
@@ -8507,6 +8659,8 @@ _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
&& (pcie_device->slot == le16_to_cpu(
pcie_device_pg0->Slot))) {
+ pcie_device->access_status =
+ pcie_device_pg0->AccessStatus;
pcie_device->responding = 1;
starget = pcie_device->starget;
if (starget && starget->hostdata) {
@@ -8594,7 +8748,7 @@ _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
- if (!(_scsih_is_nvme_device(device_info)))
+ if (!(_scsih_is_nvme_pciescsi_device(device_info)))
continue;
_scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
}
@@ -9175,7 +9329,7 @@ _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
break;
}
handle = le16_to_cpu(pcie_device_pg0.DevHandle);
- if (!(_scsih_is_nvme_device(
+ if (!(_scsih_is_nvme_pciescsi_device(
le32_to_cpu(pcie_device_pg0.DeviceInfo))))
continue;
pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
@@ -9207,15 +9361,17 @@ void mpt3sas_scsih_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * mpt3sas_scsih_after_reset_handler - reset callback handler (for scsih)
+ * mpt3sas_scsih_clear_outstanding_scsi_tm_commands - clears outstanding
+ * scsi & tm cmds.
* @ioc: per adapter object
*
* The handler for doing any required cleanup or initialization.
*/
void
-mpt3sas_scsih_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_scsih_clear_outstanding_scsi_tm_commands(struct MPT3SAS_ADAPTER *ioc)
{
- dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
+ dtmprintk(ioc,
+ ioc_info(ioc, "%s: clear outstanding scsi & tm cmds\n", __func__));
if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
ioc->scsih_cmds.status |= MPT3_CMD_RESET;
mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
@@ -9292,6 +9448,7 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
}
_scsih_remove_unresponding_devices(ioc);
_scsih_scan_for_devices_after_reset(ioc);
+ _scsih_set_nvme_max_shutdown_latency(ioc);
break;
case MPT3SAS_PORT_ENABLE_COMPLETE:
ioc->start_scan = 0;
@@ -9308,7 +9465,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
_scsih_sas_topology_change_event(ioc, fw_event);
break;
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
- _scsih_sas_device_status_change_event(ioc, fw_event);
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ fw_event->event_data);
break;
case MPI2_EVENT_SAS_DISCOVERY:
_scsih_sas_discovery_event(ioc, fw_event);
@@ -9481,6 +9641,10 @@ mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
break;
}
case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc,
+ (Mpi2EventDataSasDeviceStatusChange_t *)
+ mpi_reply->EventData);
+ break;
case MPI2_EVENT_IR_OPERATION_STATUS:
case MPI2_EVENT_SAS_DISCOVERY:
case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
@@ -9588,6 +9752,75 @@ _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
}
/**
+ * _scsih_nvme_shutdown - NVMe shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending IoUnitControl request with shutdown operation code to alert IOC that
+ * the host system is shutting down so that IOC can issue NVMe shutdown to
+ * NVMe drives attached to it.
+ */
+static void
+_scsih_nvme_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi26IoUnitControlRequest_t *mpi_request;
+ Mpi26IoUnitControlReply_t *mpi_reply;
+ u16 smid;
+
+ /* are there any NVMe devices ? */
+ if (list_empty(&ioc->pcie_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ ioc_err(ioc, "%s: scsih_cmd in use\n", __func__);
+ goto out;
+ }
+
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ ioc_err(ioc,
+ "%s: failed obtaining a smid\n", __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi26IoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI26_CTRL_OP_SHUTDOWN;
+
+ init_completion(&ioc->scsih_cmds.done);
+ ioc->put_smid_default(ioc, smid);
+ /* Wait for max_shutdown_latency seconds */
+ ioc_info(ioc,
+ "Io Unit Control shutdown (sending), Shutdown latency %d sec\n",
+ ioc->max_shutdown_latency);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done,
+ ioc->max_shutdown_latency*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ ioc_err(ioc, "%s: timeout\n", __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_info(ioc, "Io Unit Control shutdown (complete):"
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+
+/**
* _scsih_ir_shutdown - IR shutdown notification
* @ioc: per adapter object
*
@@ -9779,6 +10012,7 @@ scsih_shutdown(struct pci_dev *pdev)
&ioc->ioc_pg1_copy);
_scsih_ir_shutdown(ioc);
+ _scsih_nvme_shutdown(ioc);
mpt3sas_base_detach(ioc);
}
@@ -10039,6 +10273,12 @@ _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
pcie_device_put(pcie_device);
continue;
}
+ if (pcie_device->access_status ==
+ MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED) {
+ pcie_device_make_active(ioc, pcie_device);
+ pcie_device_put(pcie_device);
+ continue;
+ }
rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
pcie_device->id, 0);
if (rc) {
@@ -10115,6 +10355,8 @@ scsih_scan_start(struct Scsi_Host *shost)
int rc;
if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+ else if (ioc->manu_pg11.HostTraceBufferMaxSizeKB != 0)
+ mpt3sas_enable_diag_buffer(ioc, 1);
if (disable_discovery > 0)
return;
@@ -10453,6 +10695,15 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
ioc->logging_level = logging_level;
ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* Host waits for minimum of six seconds */
+ ioc->max_shutdown_latency = IO_UNIT_CONTROL_SHUTDOWN_TIMEOUT;
+ /*
+ * Enable MEMORY MOVE support flag.
+ */
+ ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
+
+ ioc->enable_sdev_max_qd = enable_sdev_max_qd;
+
/* misc semaphores and spin locks */
mutex_init(&ioc->reset_in_progress_mutex);
/* initializing pci_access_mutex lock */
@@ -10594,6 +10845,7 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
mpt3sas_base_stop_watchdog(ioc);
flush_scheduled_work();
scsi_block_requests(shost);
+ _scsih_nvme_shutdown(ioc);
device_state = pci_choose_state(pdev, state);
ioc_info(ioc, "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
@@ -10628,7 +10880,7 @@ scsih_resume(struct pci_dev *pdev)
r = mpt3sas_base_map_resources(ioc);
if (r)
return r;
-
+ ioc_info(ioc, "Issuing Hard Reset as part of OS Resume\n");
mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
scsi_unblock_requests(shost);
mpt3sas_base_start_watchdog(ioc);
@@ -10697,6 +10949,7 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
if (rc)
return PCI_ERS_RESULT_DISCONNECT;
+ ioc_info(ioc, "Issuing Hard Reset as part of PCI Slot Reset\n");
rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
ioc_warn(ioc, "hard reset: %s\n",
OpenPOWER on IntegriCloud