summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c319
1 files changed, 294 insertions, 25 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index d9afc51af7d3..882744852aac 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -99,6 +99,7 @@ static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
static unsigned int ipr_number_of_msix = 2;
+static unsigned int ipr_fast_reboot;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -221,6 +222,8 @@ MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:2)");
+module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -495,6 +498,10 @@ struct ipr_error_table_t ipr_error_table[] = {
"4061: Multipath redundancy level got better"},
{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
"4060: Multipath redundancy level got worse"},
+ {0x06808100, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9083: Device raw mode enabled"},
+ {0x06808200, 0, IPR_DEFAULT_LOG_LEVEL,
+ "9084: Device raw mode disabled"},
{0x07270000, 0, 0,
"Failure due to other device"},
{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -1462,7 +1469,8 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
- if (ioasc != IPR_IOASC_IOA_WAS_RESET)
+ if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
@@ -2566,7 +2574,8 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
ipr_handle_log_data(ioa_cfg, hostrcb);
if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
- } else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
+ } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
+ ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
dev_err(&ioa_cfg->pdev->dev,
"Host RCB failed with IOASC: 0x%08X\n", ioasc);
}
@@ -4491,11 +4500,83 @@ static struct device_attribute ipr_resource_type_attr = {
.show = ipr_show_resource_type
};
+/**
+ * ipr_show_raw_mode - Show the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_raw_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res)
+ len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
+ else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+/**
+ * ipr_store_raw_mode - Change the adapter's raw mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_raw_mode(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ ssize_t len;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res) {
+ if (ioa_cfg->sis64 && ipr_is_af_dasd_device(res)) {
+ res->raw_mode = simple_strtoul(buf, NULL, 10);
+ len = strlen(buf);
+ if (res->sdev)
+ sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
+ res->raw_mode ? "enabled" : "disabled");
+ } else
+ len = -EINVAL;
+ } else
+ len = -ENXIO;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return len;
+}
+
+static struct device_attribute ipr_raw_mode_attr = {
+ .attr = {
+ .name = "raw_mode",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_raw_mode,
+ .store = ipr_store_raw_mode
+};
+
static struct device_attribute *ipr_dev_attrs[] = {
&ipr_adapter_handle_attr,
&ipr_resource_path_attr,
&ipr_device_id_attr,
&ipr_resource_type_attr,
+ &ipr_raw_mode_attr,
NULL,
};
@@ -5379,9 +5460,6 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
/* Mask the interrupt */
writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
-
- /* Clear the interrupt */
- writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
list_del(&ioa_cfg->reset_cmd->queue);
@@ -6150,6 +6228,13 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
break;
case IPR_IOASC_NR_INIT_CMD_REQUIRED:
break;
+ case IPR_IOASC_IR_NON_OPTIMIZED:
+ if (res->raw_mode) {
+ res->raw_mode = 0;
+ scsi_cmd->result |= (DID_IMM_RETRY << 16);
+ } else
+ scsi_cmd->result |= (DID_ERROR << 16);
+ break;
default:
if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
scsi_cmd->result |= (DID_ERROR << 16);
@@ -6289,6 +6374,8 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
(!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
}
+ if (res->raw_mode && ipr_is_af_dasd_device(res))
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
@@ -6402,7 +6489,6 @@ static struct scsi_host_template driver_template = {
.shost_attrs = ipr_ioa_attrs,
.sdev_attrs = ipr_dev_attrs,
.proc_name = IPR_NAME,
- .no_write_same = 1,
.use_blk_tags = 1,
};
@@ -8318,7 +8404,6 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
{
ENTER;
- pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
LEAVE;
@@ -8326,6 +8411,32 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_reset_work - Pulse a PCIe fundamental reset
+ * @work: work struct
+ *
+ * Description: This pulses warm reset to a slot.
+ *
+ **/
+static void ipr_reset_reset_work(struct work_struct *work)
+{
+ struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct pci_dev *pdev = ioa_cfg->pdev;
+ unsigned long lock_flags = 0;
+
+ ENTER;
+ pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
+ pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->reset_cmd == ipr_cmd)
+ ipr_reset_ioa_job(ipr_cmd);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+}
+
+/**
* ipr_reset_slot_reset - Reset the PCI slot of the adapter.
* @ipr_cmd: ipr command struct
*
@@ -8337,12 +8448,11 @@ static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- pci_set_pcie_reset_state(pdev, pcie_warm_reset);
+ INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
+ queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
ipr_cmd->job_step = ipr_reset_slot_reset_done;
- ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
LEAVE;
return IPR_RC_JOB_RETURN;
}
@@ -8480,6 +8590,122 @@ static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_reset_quiesce_done - Complete IOA disconnect
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Freeze the adapter to complete quiesce processing
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_ioa_bringdown_done;
+ ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
+ LEAVE;
+ return IPR_RC_JOB_CONTINUE;
+}
+
+/**
+ * ipr_reset_cancel_hcam_done - Check for outstanding commands
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Ensure nothing is outstanding to the IOA and
+ * proceed with IOA disconnect. Otherwise reset the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
+ **/
+static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ipr_cmnd *loop_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int rc = IPR_RC_JOB_CONTINUE;
+ int count = 0;
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_quiesce_done;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
+ count++;
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ rc = IPR_RC_JOB_RETURN;
+ break;
+ }
+ spin_unlock(&hrrq->_lock);
+
+ if (count)
+ break;
+ }
+
+ LEAVE;
+ return rc;
+}
+
+/**
+ * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
+ * @ipr_cmd: ipr command struct
+ *
+ * Description: Cancel any oustanding HCAMs to the IOA.
+ *
+ * Return value:
+ * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int rc = IPR_RC_JOB_CONTINUE;
+ struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_cmnd *hcam_cmd;
+ struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
+
+ ENTER;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
+
+ if (!hrrq->ioa_is_dead) {
+ if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
+ list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
+ continue;
+
+ ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
+ cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
+ cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
+ cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
+ cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
+ cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
+ cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
+ cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
+ cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
+ cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
+ cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
+ cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_CANCEL_TIMEOUT);
+
+ rc = IPR_RC_JOB_RETURN;
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ break;
+ }
+ }
+ } else
+ ipr_cmd->job_step = ipr_reset_alert;
+
+ LEAVE;
+ return rc;
+}
+
+/**
* ipr_reset_ucode_download_done - Microcode download completion
* @ipr_cmd: ipr command struct
*
@@ -8561,7 +8787,9 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
+ ipr_cmd->job_step = ipr_reset_cancel_hcam;
+ else if (shutdown_type != IPR_SHUTDOWN_NONE &&
!ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
@@ -8917,13 +9145,15 @@ static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
- for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
- if (ioa_cfg->ipr_cmnd_list[i])
- dma_pool_free(ioa_cfg->ipr_cmd_pool,
- ioa_cfg->ipr_cmnd_list[i],
- ioa_cfg->ipr_cmnd_list_dma[i]);
+ if (ioa_cfg->ipr_cmnd_list) {
+ for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
+ if (ioa_cfg->ipr_cmnd_list[i])
+ dma_pool_free(ioa_cfg->ipr_cmd_pool,
+ ioa_cfg->ipr_cmnd_list[i],
+ ioa_cfg->ipr_cmnd_list_dma[i]);
- ioa_cfg->ipr_cmnd_list[i] = NULL;
+ ioa_cfg->ipr_cmnd_list[i] = NULL;
+ }
}
if (ioa_cfg->ipr_cmd_pool)
@@ -8973,26 +9203,25 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
}
/**
- * ipr_free_all_resources - Free all allocated resources for an adapter.
- * @ipr_cmd: ipr command struct
+ * ipr_free_irqs - Free all allocated IRQs for the adapter.
+ * @ioa_cfg: ipr cfg struct
*
- * This function frees all allocated resources for the
+ * This function frees all allocated IRQs for the
* specified adapter.
*
* Return value:
* none
**/
-static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
- ENTER;
if (ioa_cfg->intr_flag == IPR_USE_MSI ||
ioa_cfg->intr_flag == IPR_USE_MSIX) {
int i;
for (i = 0; i < ioa_cfg->nvectors; i++)
free_irq(ioa_cfg->vectors_info[i].vec,
- &ioa_cfg->hrrq[i]);
+ &ioa_cfg->hrrq[i]);
} else
free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
@@ -9003,7 +9232,26 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
pci_disable_msix(pdev);
ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
}
+}
+/**
+ * ipr_free_all_resources - Free all allocated resources for an adapter.
+ * @ipr_cmd: ipr command struct
+ *
+ * This function frees all allocated resources for the
+ * specified adapter.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct pci_dev *pdev = ioa_cfg->pdev;
+
+ ENTER;
+ ipr_free_irqs(ioa_cfg);
+ if (ioa_cfg->reset_work_q)
+ destroy_workqueue(ioa_cfg->reset_work_q);
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -9823,6 +10071,14 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
(dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
ioa_cfg->needs_warm_reset = 1;
ioa_cfg->reset = ipr_reset_slot_reset;
+
+ ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
+ WQ_MEM_RECLAIM, host->host_no);
+
+ if (!ioa_cfg->reset_work_q) {
+ dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
+ goto out_free_irq;
+ }
} else
ioa_cfg->reset = ipr_reset_start_bist;
@@ -9834,6 +10090,8 @@ static int ipr_probe_ioa(struct pci_dev *pdev,
out:
return rc;
+out_free_irq:
+ ipr_free_irqs(ioa_cfg);
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
@@ -9914,6 +10172,8 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
+ if (ioa_cfg->reset_work_q)
+ flush_workqueue(ioa_cfg->reset_work_q);
INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -10036,6 +10296,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -10051,9 +10312,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
}
- ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
+ shutdown_type = IPR_SHUTDOWN_QUIESCE;
+
+ ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
+ ipr_free_irqs(ioa_cfg);
+ pci_disable_device(ioa_cfg->pdev);
+ }
}
static struct pci_device_id ipr_pci_table[] = {
@@ -10211,7 +10479,8 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
OpenPOWER on IntegriCloud