summaryrefslogtreecommitdiffstats
path: root/drivers/scsi/qla2xxx/qla_iocb.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/qla2xxx/qla_iocb.c')
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c405
1 files changed, 241 insertions, 164 deletions
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9312b19ed708..47bf60a9490a 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -292,6 +292,26 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
}
}
+/*
+ * Find the first handle that is not in use, starting from
+ * req->current_outstanding_cmd + 1. The caller must hold the lock that is
+ * associated with @req.
+ */
+uint32_t qla2xxx_get_next_handle(struct req_que *req)
+{
+ uint32_t index, handle = req->current_outstanding_cmd;
+
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
+ handle++;
+ if (handle == req->num_outstanding_cmds)
+ handle = 1;
+ if (!req->outstanding_cmds[handle])
+ return handle;
+ }
+
+ return 0;
+}
+
/**
* qla2x00_start_scsi() - Send a SCSI command to the ISP
* @sp: command to send to the ISP
@@ -306,7 +326,6 @@ qla2x00_start_scsi(srb_t *sp)
scsi_qla_host_t *vha;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
cmd_entry_t *cmd_pkt;
uint16_t cnt;
@@ -339,16 +358,8 @@ qla2x00_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -610,7 +621,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
}
cur_seg = scsi_sglist(cmd);
- ctx = GET_CMD_CTX_SP(sp);
+ ctx = sp->u.scmd.ct6_ctx;
while (tot_dsds) {
avail_dsds = (tot_dsds > QLA_DSDS_PER_IOCB) ?
@@ -943,8 +954,7 @@ alloc_and_fill:
if (sp) {
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)
- sp->u.scmd.ctx)->dsd_list);
+ &sp->u.scmd.crc_ctx->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
@@ -1041,8 +1051,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp,
if (sp) {
list_add_tail(&dsd_ptr->list,
- &((struct crc_context *)
- sp->u.scmd.ctx)->dsd_list);
+ &sp->u.scmd.crc_ctx->dsd_list);
sp->flags |= SRB_CRC_CTX_DSD_VALID;
} else {
@@ -1088,7 +1097,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
sgl = scsi_prot_sglist(cmd);
vha = sp->vha;
- difctx = sp->u.scmd.ctx;
+ difctx = sp->u.scmd.crc_ctx;
direction_to_device = cmd->sc_data_direction == DMA_TO_DEVICE;
ql_dbg(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe021,
"%s: scsi_cmnd: %p, crc_ctx: %p, sp: %p\n",
@@ -1364,6 +1373,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
cur_dsd++;
return 0;
}
+
/**
* qla24xx_build_scsi_crc_2_iocbs() - Build IOCB command utilizing Command
* Type 6 IOCB types.
@@ -1427,7 +1437,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
bundling = 0;
/* Allocate CRC context from global pool */
- crc_ctx_pkt = sp->u.scmd.ctx =
+ crc_ctx_pkt = sp->u.scmd.crc_ctx =
dma_pool_zalloc(ha->dl_dma_pool, GFP_ATOMIC, &crc_ctx_dma);
if (!crc_ctx_pkt)
@@ -1515,7 +1525,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
}
if (!bundling) {
- cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.nobundling.data_dsd[0];
} else {
/*
* Configure Bundling if we need to fetch interlaving
@@ -1525,7 +1535,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
crc_ctx_pkt->u.bundling.dif_byte_count = cpu_to_le32(dif_bytes);
crc_ctx_pkt->u.bundling.dseg_count = cpu_to_le16(tot_dsds -
tot_prot_dsds);
- cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd;
+ cur_dsd = &crc_ctx_pkt->u.bundling.data_dsd[0];
}
/* Finish the common fields of CRC pkt */
@@ -1583,7 +1593,6 @@ qla24xx_start_scsi(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
uint16_t cnt;
@@ -1611,16 +1620,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -1723,7 +1724,6 @@ qla24xx_dif_start_scsi(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt = 0;
@@ -1764,17 +1764,8 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Compute number of required data segments */
@@ -1919,7 +1910,6 @@ qla2xxx_start_scsi_mq(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
struct cmd_type_7 *cmd_pkt;
uint16_t cnt;
@@ -1950,16 +1940,8 @@ qla2xxx_start_scsi_mq(srb_t *sp)
vha->marker_needed = 0;
}
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2063,7 +2045,6 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
int nseg;
unsigned long flags;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt = 0;
@@ -2118,17 +2099,8 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
vha->marker_needed = 0;
}
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Compute number of required data segments */
@@ -2275,7 +2247,7 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
struct qla_hw_data *ha = vha->hw;
struct req_que *req = qpair->req;
device_reg_t *reg = ISP_QUE_REG(ha, req->id);
- uint32_t index, handle;
+ uint32_t handle;
request_t *pkt;
uint16_t cnt, req_cnt;
@@ -2315,16 +2287,8 @@ __qla2x00_alloc_iocbs(struct qla_qpair *qpair, srb_t *sp)
goto queuing_error;
if (sp) {
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds) {
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2441,11 +2405,19 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
static void
qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
{
+ u16 control_flags = LCF_COMMAND_LOGO;
logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
- logio->control_flags =
- cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
- if (!sp->fcport->keep_nport_handle)
- logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
+
+ if (sp->fcport->explicit_logout) {
+ control_flags |= LCF_EXPL_LOGO|LCF_FREE_NPORT;
+ } else {
+ control_flags |= LCF_IMPL_LOGO;
+
+ if (!sp->fcport->keep_nport_handle)
+ control_flags |= LCF_FREE_NPORT;
+ }
+
+ logio->control_flags = cpu_to_le16(control_flags);
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
logio->port_id[0] = sp->fcport->d_id.b.al_pa;
logio->port_id[1] = sp->fcport->d_id.b.area;
@@ -2540,13 +2512,11 @@ void qla2x00_init_timer(srb_t *sp, unsigned long tmo)
sp->free = qla2x00_sp_free;
if (IS_QLAFX00(sp->vha->hw) && sp->type == SRB_FXIOCB_DCMD)
init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
- add_timer(&sp->u.iocb_cmd.timer);
+ sp->start_timer = 1;
}
-static void
-qla2x00_els_dcmd_sp_free(void *data)
+static void qla2x00_els_dcmd_sp_free(srb_t *sp)
{
- srb_t *sp = data;
struct srb_iocb *elsio = &sp->u.iocb_cmd;
kfree(sp->fcport);
@@ -2567,19 +2537,36 @@ qla2x00_els_dcmd_iocb_timeout(void *data)
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
struct srb_iocb *lio = &sp->u.iocb_cmd;
+ unsigned long flags = 0;
+ int res, h;
ql_dbg(ql_dbg_io, vha, 0x3069,
"%s Timeout, hdl=%x, portid=%02x%02x%02x\n",
sp->name, sp->handle, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
- complete(&lio->u.els_logo.comp);
+ /* Abort the exchange */
+ res = qla24xx_async_abort_cmd(sp, false);
+ if (res) {
+ ql_dbg(ql_dbg_io, vha, 0x3070,
+ "mbx abort_command failed.\n");
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ complete(&lio->u.els_logo.comp);
+ } else {
+ ql_dbg(ql_dbg_io, vha, 0x3071,
+ "mbx abort_command success.\n");
+ }
}
-static void
-qla2x00_els_dcmd_sp_done(void *ptr, int res)
+static void qla2x00_els_dcmd_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
@@ -2657,6 +2644,10 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
memcpy(elsio->u.els_logo.els_logo_pyld, &logo_pyld,
sizeof(struct els_logo_payload));
+ ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3075, "LOGO buffer:");
+ ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x010a,
+ elsio->u.els_logo.els_logo_pyld,
+ sizeof(*elsio->u.els_logo.els_logo_pyld));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -2696,28 +2687,32 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->port_id[0] = sp->fcport->d_id.b.al_pa;
els_iocb->port_id[1] = sp->fcport->d_id.b.area;
els_iocb->port_id[2] = sp->fcport->d_id.b.domain;
- els_iocb->s_id[0] = vha->d_id.b.al_pa;
- els_iocb->s_id[1] = vha->d_id.b.area;
- els_iocb->s_id[2] = vha->d_id.b.domain;
- els_iocb->control_flags = 0;
+ /* For SID the byte order is different than DID */
+ els_iocb->s_id[1] = vha->d_id.b.al_pa;
+ els_iocb->s_id[2] = vha->d_id.b.area;
+ els_iocb->s_id[0] = vha->d_id.b.domain;
if (elsio->u.els_logo.els_cmd == ELS_DCMD_PLOGI) {
+ els_iocb->control_flags = 0;
els_iocb->tx_byte_count = els_iocb->tx_len =
- sizeof(struct els_plogi_payload);
+ cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_plogi_pyld_dma,
&els_iocb->tx_address);
els_iocb->rx_dsd_count = 1;
els_iocb->rx_byte_count = els_iocb->rx_len =
- sizeof(struct els_plogi_payload);
+ cpu_to_le32(sizeof(struct els_plogi_payload));
put_unaligned_le64(elsio->u.els_plogi.els_resp_pyld_dma,
&els_iocb->rx_address);
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3073,
"PLOGI ELS IOCB:\n");
ql_dump_buffer(ql_log_info, vha, 0x0109,
- (uint8_t *)els_iocb, 0x70);
+ (uint8_t *)els_iocb,
+ sizeof(*els_iocb));
} else {
- els_iocb->tx_byte_count = sizeof(struct els_logo_payload);
+ els_iocb->control_flags = 1 << 13;
+ els_iocb->tx_byte_count =
+ cpu_to_le32(sizeof(struct els_logo_payload));
put_unaligned_le64(elsio->u.els_logo.els_logo_pyld_dma,
&els_iocb->tx_address);
els_iocb->tx_len = cpu_to_le32(sizeof(struct els_logo_payload));
@@ -2725,6 +2720,11 @@ qla24xx_els_logo_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
els_iocb->rx_byte_count = 0;
els_iocb->rx_address = 0;
els_iocb->rx_len = 0;
+ ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x3076,
+ "LOGO ELS IOCB:");
+ ql_dump_buffer(ql_log_info, vha, 0x010b,
+ els_iocb,
+ sizeof(*els_iocb));
}
sp->vha->qla_stats.control_requests++;
@@ -2736,34 +2736,57 @@ qla2x00_els_dcmd2_iocb_timeout(void *data)
srb_t *sp = data;
fc_port_t *fcport = sp->fcport;
struct scsi_qla_host *vha = sp->vha;
- struct qla_hw_data *ha = vha->hw;
unsigned long flags = 0;
- int res;
+ int res, h;
ql_dbg(ql_dbg_io + ql_dbg_disc, vha, 0x3069,
"%s hdl=%x ELS Timeout, %8phC portid=%06x\n",
sp->name, sp->handle, fcport->port_name, fcport->d_id.b24);
/* Abort the exchange */
- spin_lock_irqsave(&ha->hardware_lock, flags);
- res = ha->isp_ops->abort_command(sp);
+ res = qla24xx_async_abort_cmd(sp, false);
ql_dbg(ql_dbg_io, vha, 0x3070,
"mbx abort_command %s\n",
(res == QLA_SUCCESS) ? "successful" : "failed");
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ if (res) {
+ spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags);
+ for (h = 1; h < sp->qpair->req->num_outstanding_cmds; h++) {
+ if (sp->qpair->req->outstanding_cmds[h] == sp) {
+ sp->qpair->req->outstanding_cmds[h] = NULL;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(sp->qpair->qp_lock_ptr, flags);
+ sp->done(sp, QLA_FUNCTION_TIMEOUT);
+ }
+}
- sp->done(sp, QLA_FUNCTION_TIMEOUT);
+void qla2x00_els_dcmd2_free(scsi_qla_host_t *vha, struct els_plogi *els_plogi)
+{
+ if (els_plogi->els_plogi_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ els_plogi->tx_size,
+ els_plogi->els_plogi_pyld,
+ els_plogi->els_plogi_pyld_dma);
+
+ if (els_plogi->els_resp_pyld)
+ dma_free_coherent(&vha->hw->pdev->dev,
+ els_plogi->rx_size,
+ els_plogi->els_resp_pyld,
+ els_plogi->els_resp_pyld_dma);
}
-static void
-qla2x00_els_dcmd2_sp_done(void *ptr, int res)
+static void qla2x00_els_dcmd2_sp_done(srb_t *sp, int res)
{
- srb_t *sp = ptr;
fc_port_t *fcport = sp->fcport;
struct srb_iocb *lio = &sp->u.iocb_cmd;
struct scsi_qla_host *vha = sp->vha;
struct event_arg ea;
struct qla_work_evt *e;
+ struct fc_port *conflict_fcport;
+ port_id_t cid; /* conflict Nport id */
+ u32 *fw_status = sp->u.iocb_cmd.u.els_plogi.fw_status;
+ u16 lid;
ql_dbg(ql_dbg_disc, vha, 0x3072,
"%s ELS done rc %d hdl=%x, portid=%06x %8phC\n",
@@ -2775,31 +2798,109 @@ qla2x00_els_dcmd2_sp_done(void *ptr, int res)
if (sp->flags & SRB_WAKEUP_ON_COMP)
complete(&lio->u.els_plogi.comp);
else {
- if (res) {
- set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
- } else {
+ switch (fw_status[0]) {
+ case CS_DATA_UNDERRUN:
+ case CS_COMPLETE:
memset(&ea, 0, sizeof(ea));
ea.fcport = fcport;
ea.rc = res;
- ea.event = FCME_ELS_PLOGI_DONE;
- qla2x00_fcport_event_handler(vha, &ea);
+ qla_handle_els_plogi_done(vha, &ea);
+ break;
+
+ case CS_IOCB_ERROR:
+ switch (fw_status[1]) {
+ case LSC_SCODE_PORTID_USED:
+ lid = fw_status[2] & 0xffff;
+ qlt_find_sess_invalidate_other(vha,
+ wwn_to_u64(fcport->port_name),
+ fcport->d_id, lid, &conflict_fcport);
+ if (conflict_fcport) {
+ /*
+ * Another fcport shares the same
+ * loop_id & nport id; conflict
+ * fcport needs to finish cleanup
+ * before this fcport can proceed
+ * to login.
+ */
+ conflict_fcport->conflict = fcport;
+ fcport->login_pause = 1;
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x post gidpn\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ } else {
+ ql_dbg(ql_dbg_disc, vha, 0x20ed,
+ "%s %d %8phC pid %06x inuse with lid %#x sched del\n",
+ __func__, __LINE__,
+ fcport->port_name,
+ fcport->d_id.b24, lid);
+ qla2x00_clear_loop_id(fcport);
+ set_bit(lid, vha->hw->loop_id_map);
+ fcport->loop_id = lid;
+ fcport->keep_nport_handle = 0;
+ qlt_schedule_sess_for_deletion(fcport);
+ }
+ break;
+
+ case LSC_SCODE_NPORT_USED:
+ cid.b.domain = (fw_status[2] >> 16) & 0xff;
+ cid.b.area = (fw_status[2] >> 8) & 0xff;
+ cid.b.al_pa = fw_status[2] & 0xff;
+ cid.b.rsvd_1 = 0;
+
+ ql_dbg(ql_dbg_disc, vha, 0x20ec,
+ "%s %d %8phC lid %#x in use with pid %06x post gnl\n",
+ __func__, __LINE__, fcport->port_name,
+ fcport->loop_id, cid.b24);
+ set_bit(fcport->loop_id,
+ vha->hw->loop_id_map);
+ fcport->loop_id = FC_NO_LOOP_ID;
+ qla24xx_post_gnl_work(vha, fcport);
+ break;
+
+ case LSC_SCODE_NOXCB:
+ vha->hw->exch_starvation++;
+ if (vha->hw->exch_starvation > 5) {
+ ql_log(ql_log_warn, vha, 0xd046,
+ "Exchange starvation. Resetting RISC\n");
+ vha->hw->exch_starvation = 0;
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+ }
+ /* fall through */
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_set_fcport_disc_state(fcport,
+ DSC_LOGIN_FAILED);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
+ }
+ break;
+
+ default:
+ ql_dbg(ql_dbg_disc, vha, 0x20eb,
+ "%s %8phC cmd error 2 fw_status 0x%x 0x%x 0x%x\n",
+ __func__, sp->fcport->port_name,
+ fw_status[0], fw_status[1], fw_status[2]);
+
+ sp->fcport->flags &= ~FCF_ASYNC_SENT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_FAILED);
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+ break;
}
e = qla2x00_alloc_work(vha, QLA_EVT_UNMAP);
if (!e) {
struct srb_iocb *elsio = &sp->u.iocb_cmd;
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.tx_size,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.rx_size,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
return;
}
@@ -2823,14 +2924,16 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!sp) {
ql_log(ql_log_info, vha, 0x70e6,
"SRB allocation failed\n");
+ fcport->flags &= ~FCF_ASYNC_ACTIVE;
return -ENOMEM;
}
+ fcport->flags |= FCF_ASYNC_SENT;
+ qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_PEND);
elsio = &sp->u.iocb_cmd;
ql_dbg(ql_dbg_io, vha, 0x3073,
"Enter: PLOGI portid=%06x\n", fcport->d_id.b24);
- fcport->flags |= FCF_ASYNC_SENT;
sp->type = SRB_ELS_DCMD;
sp->name = "ELS_DCMD";
sp->fcport = fcport;
@@ -2876,7 +2979,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x3073, "PLOGI buffer:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x0109,
- (uint8_t *)elsio->u.els_plogi.els_plogi_pyld, 0x70);
+ (uint8_t *)elsio->u.els_plogi.els_plogi_pyld,
+ sizeof(*elsio->u.els_plogi.els_plogi_pyld));
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -2898,19 +3002,8 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
}
out:
- fcport->flags &= ~(FCF_ASYNC_SENT);
- if (elsio->u.els_plogi.els_plogi_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.tx_size,
- elsio->u.els_plogi.els_plogi_pyld,
- elsio->u.els_plogi.els_plogi_pyld_dma);
-
- if (elsio->u.els_plogi.els_resp_pyld)
- dma_free_coherent(&sp->vha->hw->pdev->dev,
- elsio->u.els_plogi.rx_size,
- elsio->u.els_plogi.els_resp_pyld,
- elsio->u.els_plogi.els_resp_pyld_dma);
-
+ fcport->flags &= ~(FCF_ASYNC_SENT | FCF_ASYNC_ACTIVE);
+ qla2x00_els_dcmd2_free(vha, &elsio->u.els_plogi);
sp->free(sp);
done:
return rval;
@@ -3115,7 +3208,6 @@ qla82xx_start_scsi(srb_t *sp)
unsigned long flags;
struct scsi_cmnd *cmd;
uint32_t *clr_ptr;
- uint32_t index;
uint32_t handle;
uint16_t cnt;
uint16_t req_cnt;
@@ -3155,16 +3247,8 @@ qla82xx_start_scsi(srb_t *sp)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
- if (index == req->num_outstanding_cmds)
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -3235,7 +3319,7 @@ sufficient_dsds:
goto queuing_error;
}
- ctx = sp->u.scmd.ctx =
+ ctx = sp->u.scmd.ct6_ctx =
mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
if (!ctx) {
ql_log(ql_log_fatal, vha, 0x3010,
@@ -3431,9 +3515,9 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
- if (sp->u.scmd.ctx) {
- mempool_free(sp->u.scmd.ctx, ha->ctx_mempool);
- sp->u.scmd.ctx = NULL;
+ if (sp->u.scmd.crc_ctx) {
+ mempool_free(sp->u.scmd.crc_ctx, ha->ctx_mempool);
+ sp->u.scmd.crc_ctx = NULL;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3668,6 +3752,9 @@ qla2x00_start_sp(srb_t *sp)
break;
}
+ if (sp->start_timer)
+ add_timer(&sp->u.iocb_cmd.timer);
+
wmb();
qla2x00_start_iocbs(vha, qp->req);
done:
@@ -3769,7 +3856,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
uint32_t handle;
- uint32_t index;
uint16_t req_cnt;
uint16_t cnt;
uint32_t *clr_ptr;
@@ -3794,17 +3880,8 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Acquire ring specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
- /* Check for room in outstanding command list. */
- handle = req->current_outstanding_cmd;
- for (index = 1; index < req->num_outstanding_cmds; index++) {
- handle++;
- if (handle == req->num_outstanding_cmds)
- handle = 1;
- if (!req->outstanding_cmds[handle])
- break;
- }
-
- if (index == req->num_outstanding_cmds) {
+ handle = qla2xxx_get_next_handle(req);
+ if (handle == 0) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
OpenPOWER on IntegriCloud