summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/ipr.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/ipr.c')
-rw-r--r--drivers/scsi/ipr.c687
1 files changed, 667 insertions, 20 deletions
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 7ed4eef8347b..e1fe9494125b 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -70,6 +70,7 @@
#include <linux/firmware.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/libata.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/processor.h>
@@ -78,6 +79,7 @@
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_transport.h>
#include "ipr.h"
/*
@@ -199,6 +201,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"FFFA: Undefined device response recovered by the IOA"},
{0x014A0000, 1, 1,
"FFF6: Device bus error, message or command phase"},
+ {0x014A8000, 0, 1,
+ "FFFE: Task Management Function failed"},
{0x015D0000, 0, 1,
"FFF6: Failure prediction threshold exceeded"},
{0x015D9200, 0, 1,
@@ -261,6 +265,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"Device bus status error"},
{0x04448600, 0, 1,
"8157: IOA error requiring IOA reset to recover"},
+ {0x04448700, 0, 0,
+ "ATA device status error"},
{0x04490000, 0, 0,
"Message reject received from the device"},
{0x04449200, 0, 1,
@@ -273,6 +279,8 @@ struct ipr_error_table_t ipr_error_table[] = {
"9082: IOA detected device error"},
{0x044A0000, 1, 1,
"3110: Device bus error, message or command phase"},
+ {0x044A8000, 1, 1,
+ "3110: SAS Command / Task Management Function failed"},
{0x04670400, 0, 1,
"9091: Incorrect hardware configuration change has been detected"},
{0x04678000, 0, 1,
@@ -453,7 +461,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
- trace_entry->cmd_index = ipr_cmd->cmd_index;
+ trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
+ trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
}
@@ -480,8 +489,10 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
ioarcb->read_ioadl_len = 0;
ioasa->ioasc = 0;
ioasa->residual_data_len = 0;
+ ioasa->u.gata.status = 0;
ipr_cmd->scsi_cmd = NULL;
+ ipr_cmd->qc = NULL;
ipr_cmd->sense_buffer[0] = 0;
ipr_cmd->dma_use_sg = 0;
}
@@ -626,6 +637,28 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
}
/**
+ * ipr_sata_eh_done - done function for aborted SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked for ops generated to SATA
+ * devices which are being aborted.
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ata_queued_cmd *qc = ipr_cmd->qc;
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+
+ qc->err_mask |= AC_ERR_OTHER;
+ sata_port->ioasa.status |= ATA_BUSY;
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ata_qc_complete(qc);
+}
+
+/**
* ipr_scsi_eh_done - mid-layer done function for aborted ops
* @ipr_cmd: ipr command struct
*
@@ -669,6 +702,8 @@ static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
if (ipr_cmd->scsi_cmd)
ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
del_timer(&ipr_cmd->timer);
@@ -825,6 +860,7 @@ static void ipr_init_res_entry(struct ipr_resource_entry *res)
res->del_from_ml = 0;
res->resetting_device = 0;
res->sdev = NULL;
+ res->sata_port = NULL;
}
/**
@@ -1316,7 +1352,7 @@ static u32 ipr_get_error(u32 ioasc)
int i;
for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
- if (ipr_error_table[i].ioasc == ioasc)
+ if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
return i;
return 0;
@@ -3051,6 +3087,17 @@ static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
**/
static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+
+ if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
+ qdepth = IPR_MAX_CMD_PER_ATA_LUN;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
return sdev->queue_depth;
}
@@ -3166,6 +3213,122 @@ static int ipr_biosparam(struct scsi_device *sdev,
}
/**
+ * ipr_find_starget - Find target based on bus/target.
+ * @starget: scsi target struct
+ *
+ * Return value:
+ * resource entry pointer if found / NULL if not found
+ **/
+static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ struct ipr_resource_entry *res;
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if ((res->cfgte.res_addr.bus == starget->channel) &&
+ (res->cfgte.res_addr.target == starget->id) &&
+ (res->cfgte.res_addr.lun == 0)) {
+ return res;
+ }
+ }
+
+ return NULL;
+}
+
+static struct ata_port_info sata_port_info;
+
+/**
+ * ipr_target_alloc - Prepare for commands to a SCSI target
+ * @starget: scsi target struct
+ *
+ * If the device is a SATA device, this function allocates an
+ * ATA port with libata, else it does nothing.
+ *
+ * Return value:
+ * 0 on success / non-0 on failure
+ **/
+static int ipr_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+ struct ipr_sata_port *sata_port;
+ struct ata_port *ap;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = ipr_find_starget(starget);
+ starget->hostdata = NULL;
+
+ if (res && ipr_is_gata(res)) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
+ if (!sata_port)
+ return -ENOMEM;
+
+ ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
+ if (ap) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ sata_port->ioa_cfg = ioa_cfg;
+ sata_port->ap = ap;
+ sata_port->res = res;
+
+ res->sata_port = sata_port;
+ ap->private_data = sata_port;
+ starget->hostdata = sata_port;
+ } else {
+ kfree(sata_port);
+ return -ENOMEM;
+ }
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+
+ return 0;
+}
+
+/**
+ * ipr_target_destroy - Destroy a SCSI target
+ * @starget: scsi target struct
+ *
+ * If the device was a SATA device, this function frees the libata
+ * ATA port, else it does nothing.
+ *
+ **/
+static void ipr_target_destroy(struct scsi_target *starget)
+{
+ struct ipr_sata_port *sata_port = starget->hostdata;
+
+ if (sata_port) {
+ starget->hostdata = NULL;
+ ata_sas_port_destroy(sata_port->ap);
+ kfree(sata_port);
+ }
+}
+
+/**
+ * ipr_find_sdev - Find device based on bus/target/lun.
+ * @sdev: scsi device struct
+ *
+ * Return value:
+ * resource entry pointer if found / NULL if not found
+ **/
+static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
+{
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
+ struct ipr_resource_entry *res;
+
+ list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+ if ((res->cfgte.res_addr.bus == sdev->channel) &&
+ (res->cfgte.res_addr.target == sdev->id) &&
+ (res->cfgte.res_addr.lun == sdev->lun))
+ return res;
+ }
+
+ return NULL;
+}
+
+/**
* ipr_slave_destroy - Unconfigure a SCSI device
* @sdev: scsi device struct
*
@@ -3183,8 +3346,11 @@ static void ipr_slave_destroy(struct scsi_device *sdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
res = (struct ipr_resource_entry *) sdev->hostdata;
if (res) {
+ if (res->sata_port)
+ ata_port_disable(res->sata_port->ap);
sdev->hostdata = NULL;
res->sdev = NULL;
+ res->sata_port = NULL;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
}
@@ -3219,13 +3385,45 @@ static int ipr_slave_configure(struct scsi_device *sdev)
}
if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
sdev->allow_restart = 1;
- scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+ if (ipr_is_gata(res) && res->sata_port) {
+ scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
+ ata_sas_slave_configure(sdev, res->sata_port->ap);
+ } else {
+ scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+ }
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return 0;
}
/**
+ * ipr_ata_slave_alloc - Prepare for commands to a SATA device
+ * @sdev: scsi device struct
+ *
+ * This function initializes an ATA port so that future commands
+ * sent through queuecommand will work.
+ *
+ * Return value:
+ * 0 on success
+ **/
+static int ipr_ata_slave_alloc(struct scsi_device *sdev)
+{
+ struct ipr_sata_port *sata_port = NULL;
+ int rc = -ENXIO;
+
+ ENTER;
+ if (sdev->sdev_target)
+ sata_port = sdev->sdev_target->hostdata;
+ if (sata_port)
+ rc = ata_sas_port_init(sata_port->ap);
+ if (rc)
+ ipr_slave_destroy(sdev);
+
+ LEAVE;
+ return rc;
+}
+
+/**
* ipr_slave_alloc - Prepare for commands to a device.
* @sdev: scsi device struct
*
@@ -3248,18 +3446,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
- if ((res->cfgte.res_addr.bus == sdev->channel) &&
- (res->cfgte.res_addr.target == sdev->id) &&
- (res->cfgte.res_addr.lun == sdev->lun)) {
- res->sdev = sdev;
- res->add_to_ml = 0;
- res->in_erp = 0;
- sdev->hostdata = res;
- if (!ipr_is_naca_model(res))
- res->needs_sync_complete = 1;
- rc = 0;
- break;
+ res = ipr_find_sdev(sdev);
+ if (res) {
+ res->sdev = sdev;
+ res->add_to_ml = 0;
+ res->in_erp = 0;
+ sdev->hostdata = res;
+ if (!ipr_is_naca_model(res))
+ res->needs_sync_complete = 1;
+ rc = 0;
+ if (ipr_is_gata(res)) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return ipr_ata_slave_alloc(sdev);
}
}
@@ -3314,7 +3512,8 @@ static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
* This function issues a device reset to the affected device.
* If the device is a SCSI device, a LUN reset will be sent
* to the device first. If that does not work, a target reset
- * will be sent.
+ * will be sent. If the device is a SATA device, a PHY reset will
+ * be sent.
*
* Return value:
* 0 on success / non-zero on failure
@@ -3325,26 +3524,79 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
struct ipr_cmd_pkt *cmd_pkt;
+ struct ipr_ioarcb_ata_regs *regs;
u32 ioasc;
ENTER;
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
cmd_pkt = &ioarcb->cmd_pkt;
+ regs = &ioarcb->add_data.u.regs;
ioarcb->res_handle = res->cfgte.res_handle;
cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
+ if (ipr_is_gata(res)) {
+ cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
+ ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
+ regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
+ }
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
+ memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
LEAVE;
return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
}
/**
+ * ipr_sata_reset - Reset the SATA port
+ * @ap: SATA port to reset
+ * @classes: class of the attached device
+ *
+ * This function issues a SATA phy reset to the affected ATA port.
+ *
+ * Return value:
+ * 0 on success / non-zero on failure
+ **/
+static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes)
+{
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_resource_entry *res;
+ unsigned long lock_flags = 0;
+ int rc = -ENXIO;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ res = sata_port->res;
+ if (res) {
+ rc = ipr_device_reset(ioa_cfg, res);
+ switch(res->cfgte.proto) {
+ case IPR_PROTO_SATA:
+ case IPR_PROTO_SAS_STP:
+ *classes = ATA_DEV_ATA;
+ break;
+ case IPR_PROTO_SATA_ATAPI:
+ case IPR_PROTO_SAS_STP_ATAPI:
+ *classes = ATA_DEV_ATAPI;
+ break;
+ default:
+ *classes = ATA_DEV_UNKNOWN;
+ break;
+ };
+ }
+
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
+ return rc;
+}
+
+/**
* ipr_eh_dev_reset - Reset the device
* @scsi_cmd: scsi command struct
*
@@ -3360,7 +3612,8 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
struct ipr_cmnd *ipr_cmd;
struct ipr_ioa_cfg *ioa_cfg;
struct ipr_resource_entry *res;
- int rc;
+ struct ata_port *ap;
+ int rc = 0;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -3388,7 +3641,14 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
- rc = ipr_device_reset(ioa_cfg, res);
+
+ if (ipr_is_gata(res) && res->sata_port) {
+ ap = res->sata_port->ap;
+ spin_unlock_irq(scsi_cmd->device->host->host_lock);
+ ata_do_eh(ap, NULL, NULL, ipr_sata_reset, NULL);
+ spin_lock_irq(scsi_cmd->device->host->host_lock);
+ } else
+ rc = ipr_device_reset(ioa_cfg, res);
res->resetting_device = 0;
LEAVE;
@@ -4300,6 +4560,9 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
return 0;
}
+ if (ipr_is_gata(res) && res->sata_port)
+ return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
+
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioarcb = &ipr_cmd->ioarcb;
list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
@@ -4345,6 +4608,26 @@ static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
}
/**
+ * ipr_ioctl - IOCTL handler
+ * @sdev: scsi device struct
+ * @cmd: IOCTL cmd
+ * @arg: IOCTL arg
+ *
+ * Return value:
+ * 0 on success / other on failure
+ **/
+int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
+{
+ struct ipr_resource_entry *res;
+
+ res = (struct ipr_resource_entry *)sdev->hostdata;
+ if (res && ipr_is_gata(res))
+ return ata_scsi_ioctl(sdev, cmd, arg);
+
+ return -EINVAL;
+}
+
+/**
* ipr_info - Get information about the card/driver
* @scsi_host: scsi host struct
*
@@ -4366,10 +4649,45 @@ static const char * ipr_ioa_info(struct Scsi_Host *host)
return buffer;
}
+/**
+ * ipr_scsi_timed_out - Handle scsi command timeout
+ * @scsi_cmd: scsi command struct
+ *
+ * Return value:
+ * EH_NOT_HANDLED
+ **/
+enum scsi_eh_timer_return ipr_scsi_timed_out(struct scsi_cmnd *scsi_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ unsigned long flags;
+
+ ENTER;
+ spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
+ ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
+
+ list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+ if (ipr_cmd->qc && ipr_cmd->qc->scsicmd == scsi_cmd) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ LEAVE;
+ return EH_NOT_HANDLED;
+}
+
+static struct scsi_transport_template ipr_transport_template = {
+ .eh_timed_out = ipr_scsi_timed_out
+};
+
static struct scsi_host_template driver_template = {
.module = THIS_MODULE,
.name = "IPR",
.info = ipr_ioa_info,
+ .ioctl = ipr_ioctl,
.queuecommand = ipr_queuecommand,
.eh_abort_handler = ipr_eh_abort,
.eh_device_reset_handler = ipr_eh_dev_reset,
@@ -4377,6 +4695,8 @@ static struct scsi_host_template driver_template = {
.slave_alloc = ipr_slave_alloc,
.slave_configure = ipr_slave_configure,
.slave_destroy = ipr_slave_destroy,
+ .target_alloc = ipr_target_alloc,
+ .target_destroy = ipr_target_destroy,
.change_queue_depth = ipr_change_queue_depth,
.change_queue_type = ipr_change_queue_type,
.bios_param = ipr_biosparam,
@@ -4391,6 +4711,330 @@ static struct scsi_host_template driver_template = {
.proc_name = IPR_NAME
};
+/**
+ * ipr_ata_phy_reset - libata phy_reset handler
+ * @ap: ata port to reset
+ *
+ **/
+static void ipr_ata_phy_reset(struct ata_port *ap)
+{
+ unsigned long flags;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ int rc;
+
+ ENTER;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ while(ioa_cfg->in_reset_reload) {
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ }
+
+ if (!ioa_cfg->allow_cmds)
+ goto out_unlock;
+
+ rc = ipr_device_reset(ioa_cfg, res);
+
+ if (rc) {
+ ap->ops->port_disable(ap);
+ goto out_unlock;
+ }
+
+ switch(res->cfgte.proto) {
+ case IPR_PROTO_SATA:
+ case IPR_PROTO_SAS_STP:
+ ap->device[0].class = ATA_DEV_ATA;
+ break;
+ case IPR_PROTO_SATA_ATAPI:
+ case IPR_PROTO_SAS_STP_ATAPI:
+ ap->device[0].class = ATA_DEV_ATAPI;
+ break;
+ default:
+ ap->device[0].class = ATA_DEV_UNKNOWN;
+ ap->ops->port_disable(ap);
+ break;
+ };
+
+out_unlock:
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+ LEAVE;
+}
+
+/**
+ * ipr_ata_post_internal - Cleanup after an internal command
+ * @qc: ATA queued command
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
+{
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ unsigned long flags;
+
+ spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+ list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+}
+
+/**
+ * ipr_tf_read - Read the current ATA taskfile for the ATA port
+ * @ap: ATA port
+ * @tf: destination ATA taskfile
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
+{
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioasa_gata *g = &sata_port->ioasa;
+
+ tf->feature = g->error;
+ tf->nsect = g->nsect;
+ tf->lbal = g->lbal;
+ tf->lbam = g->lbam;
+ tf->lbah = g->lbah;
+ tf->device = g->device;
+ tf->command = g->status;
+ tf->hob_nsect = g->hob_nsect;
+ tf->hob_lbal = g->hob_lbal;
+ tf->hob_lbam = g->hob_lbam;
+ tf->hob_lbah = g->hob_lbah;
+ tf->ctl = g->alt_status;
+}
+
+/**
+ * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
+ * @regs: destination
+ * @tf: source ATA taskfile
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
+ struct ata_taskfile *tf)
+{
+ regs->feature = tf->feature;
+ regs->nsect = tf->nsect;
+ regs->lbal = tf->lbal;
+ regs->lbam = tf->lbam;
+ regs->lbah = tf->lbah;
+ regs->device = tf->device;
+ regs->command = tf->command;
+ regs->hob_feature = tf->hob_feature;
+ regs->hob_nsect = tf->hob_nsect;
+ regs->hob_lbal = tf->hob_lbal;
+ regs->hob_lbam = tf->hob_lbam;
+ regs->hob_lbah = tf->hob_lbah;
+ regs->ctl = tf->ctl;
+}
+
+/**
+ * ipr_sata_done - done function for SATA commands
+ * @ipr_cmd: ipr command struct
+ *
+ * This function is invoked by the interrupt handler for
+ * ops generated by the SCSI mid-layer to SATA devices
+ *
+ * Return value:
+ * none
+ **/
+static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
+{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ struct ata_queued_cmd *qc = ipr_cmd->qc;
+ struct ipr_sata_port *sata_port = qc->ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+
+ memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
+ sizeof(struct ipr_ioasa_gata));
+ ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
+
+ if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
+ scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
+ res->cfgte.res_addr.target);
+
+ if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
+ qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+ else
+ qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ata_qc_complete(qc);
+}
+
+/**
+ * ipr_build_ata_ioadl - Build an ATA scatter/gather list
+ * @ipr_cmd: ipr command struct
+ * @qc: ATA queued command
+ *
+ **/
+static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
+ struct ata_queued_cmd *qc)
+{
+ u32 ioadl_flags = 0;
+ struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+ int len = qc->nbytes + qc->pad_len;
+ struct scatterlist *sg;
+
+ if (len == 0)
+ return;
+
+ if (qc->dma_dir == DMA_TO_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+ ioarcb->write_data_transfer_length = cpu_to_be32(len);
+ ioarcb->write_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ } else if (qc->dma_dir == DMA_FROM_DEVICE) {
+ ioadl_flags = IPR_IOADL_FLAGS_READ;
+ ioarcb->read_data_transfer_length = cpu_to_be32(len);
+ ioarcb->read_ioadl_len =
+ cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
+ }
+
+ ata_for_each_sg(sg, qc) {
+ ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
+ ioadl->address = cpu_to_be32(sg_dma_address(sg));
+ if (ata_sg_is_last(sg, qc))
+ ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+ else
+ ioadl++;
+ }
+}
+
+/**
+ * ipr_qc_issue - Issue a SATA qc to a device
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_resource_entry *res = sata_port->res;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioarcb *ioarcb;
+ struct ipr_ioarcb_ata_regs *regs;
+
+ if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+ return -EIO;
+
+ ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ ioarcb = &ipr_cmd->ioarcb;
+ regs = &ioarcb->add_data.u.regs;
+
+ memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
+ ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
+
+ list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ ipr_cmd->qc = qc;
+ ipr_cmd->done = ipr_sata_done;
+ ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
+ ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
+ ipr_cmd->dma_use_sg = qc->pad_len ? qc->n_elem + 1 : qc->n_elem;
+
+ ipr_build_ata_ioadl(ipr_cmd, qc);
+ regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
+ ipr_copy_sata_tf(regs, &qc->tf);
+ memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+
+ switch (qc->tf.protocol) {
+ case ATA_PROT_NODATA:
+ case ATA_PROT_PIO:
+ break;
+
+ case ATA_PROT_DMA:
+ regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
+ break;
+
+ case ATA_PROT_ATAPI:
+ case ATA_PROT_ATAPI_NODATA:
+ regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
+ break;
+
+ case ATA_PROT_ATAPI_DMA:
+ regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
+ regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
+ break;
+
+ default:
+ WARN_ON(1);
+ return -1;
+ }
+
+ mb();
+ writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
+ ioa_cfg->regs.ioarrin_reg);
+ return 0;
+}
+
+/**
+ * ipr_ata_check_status - Return last ATA status
+ * @ap: ATA port
+ *
+ * Return value:
+ * ATA status
+ **/
+static u8 ipr_ata_check_status(struct ata_port *ap)
+{
+ struct ipr_sata_port *sata_port = ap->private_data;
+ return sata_port->ioasa.status;
+}
+
+/**
+ * ipr_ata_check_altstatus - Return last ATA altstatus
+ * @ap: ATA port
+ *
+ * Return value:
+ * Alt ATA status
+ **/
+static u8 ipr_ata_check_altstatus(struct ata_port *ap)
+{
+ struct ipr_sata_port *sata_port = ap->private_data;
+ return sata_port->ioasa.alt_status;
+}
+
+static struct ata_port_operations ipr_sata_ops = {
+ .port_disable = ata_port_disable,
+ .check_status = ipr_ata_check_status,
+ .check_altstatus = ipr_ata_check_altstatus,
+ .dev_select = ata_noop_dev_select,
+ .phy_reset = ipr_ata_phy_reset,
+ .post_internal_cmd = ipr_ata_post_internal,
+ .tf_read = ipr_tf_read,
+ .qc_prep = ata_noop_qc_prep,
+ .qc_issue = ipr_qc_issue,
+ .port_start = ata_sas_port_start,
+ .port_stop = ata_sas_port_stop
+};
+
+static struct ata_port_info sata_port_info = {
+ .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
+ ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
+ .pio_mask = 0x10, /* pio4 */
+ .mwdma_mask = 0x07,
+ .udma_mask = 0x7f, /* udma0-6 */
+ .port_ops = &ipr_sata_ops
+};
+
#ifdef CONFIG_PPC_PSERIES
static const u16 ipr_blocked_processors[] = {
PV_NORTHSTAR,
@@ -6352,7 +6996,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
struct Scsi_Host *host;
unsigned long ipr_regs_pci;
void __iomem *ipr_regs;
- u32 rc = PCIBIOS_SUCCESSFUL;
+ int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc;
ENTER;
@@ -6374,6 +7018,9 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
+ host->transportt = &ipr_transport_template;
+ ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
+ sata_port_info.flags, &ipr_sata_ops);
ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
@@ -6749,7 +7396,7 @@ static int __init ipr_init(void)
ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
- return pci_module_init(&ipr_driver);
+ return pci_register_driver(&ipr_driver);
}
/**
OpenPOWER on IntegriCloud