diff options
Diffstat (limited to 'drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c')
-rw-r--r-- | drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c | 114 |
1 files changed, 89 insertions, 25 deletions
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 0f807798c624..d390325c99ec 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -1170,6 +1170,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) cmd = list_first_entry_or_null(&vscsi->free_cmd, struct ibmvscsis_cmd, list); if (cmd) { + cmd->flags &= ~(DELAY_SEND); list_del(&cmd->list); cmd->iue = iue; cmd->type = UNSET_TYPE; @@ -1749,45 +1750,79 @@ static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc) static void ibmvscsis_send_messages(struct scsi_info *vscsi) { u64 msg_hi = 0; - /* note do not attmempt to access the IU_data_ptr with this pointer + /* note do not attempt to access the IU_data_ptr with this pointer * it is not valid */ struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi; struct ibmvscsis_cmd *cmd, *nxt; struct iu_entry *iue; long rc = ADAPT_SUCCESS; + bool retry = false; if (!(vscsi->flags & RESPONSE_Q_DOWN)) { - list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) { - iue = cmd->iue; + do { + retry = false; + list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, + list) { + /* + * Check to make sure abort cmd gets processed + * prior to the abort tmr cmd + */ + if (cmd->flags & DELAY_SEND) + continue; - crq->valid = VALID_CMD_RESP_EL; - crq->format = cmd->rsp.format; + if (cmd->abort_cmd) { + retry = true; + cmd->abort_cmd->flags &= ~(DELAY_SEND); + } - if (cmd->flags & CMD_FAST_FAIL) - crq->status = VIOSRP_ADAPTER_FAIL; + /* + * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and + * the case where LIO issued a + * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST + * case then we dont send a response, since it + * was already done. + */ + if (cmd->se_cmd.transport_state & CMD_T_ABORTED && + !(cmd->se_cmd.transport_state & CMD_T_TAS)) { + list_del(&cmd->list); + ibmvscsis_free_cmd_resources(vscsi, + cmd); + } else { + iue = cmd->iue; - crq->IU_length = cpu_to_be16(cmd->rsp.len); + crq->valid = VALID_CMD_RESP_EL; + crq->format = cmd->rsp.format; - rc = h_send_crq(vscsi->dma_dev->unit_address, - be64_to_cpu(msg_hi), - be64_to_cpu(cmd->rsp.tag)); + if (cmd->flags & CMD_FAST_FAIL) + crq->status = VIOSRP_ADAPTER_FAIL; - pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", - cmd, be64_to_cpu(cmd->rsp.tag), rc); + crq->IU_length = cpu_to_be16(cmd->rsp.len); - /* if all ok free up the command element resources */ - if (rc == H_SUCCESS) { - /* some movement has occurred */ - vscsi->rsp_q_timer.timer_pops = 0; - list_del(&cmd->list); + rc = h_send_crq(vscsi->dma_dev->unit_address, + be64_to_cpu(msg_hi), + be64_to_cpu(cmd->rsp.tag)); - ibmvscsis_free_cmd_resources(vscsi, cmd); - } else { - srp_snd_msg_failed(vscsi, rc); - break; + pr_debug("send_messages: cmd %p, tag 0x%llx, rc %ld\n", + cmd, be64_to_cpu(cmd->rsp.tag), rc); + + /* if all ok free up the command + * element resources + */ + if (rc == H_SUCCESS) { + /* some movement has occurred */ + vscsi->rsp_q_timer.timer_pops = 0; + list_del(&cmd->list); + + ibmvscsis_free_cmd_resources(vscsi, + cmd); + } else { + srp_snd_msg_failed(vscsi, rc); + break; + } + } } - } + } while (retry); if (!rc) { /* @@ -2708,6 +2743,7 @@ static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num) for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num; i++, cmd++) { + cmd->abort_cmd = NULL; cmd->adapter = vscsi; INIT_WORK(&cmd->work, ibmvscsis_scheduler); list_add_tail(&cmd->list, &vscsi->free_cmd); @@ -3579,9 +3615,20 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd) { struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, se_cmd); + struct scsi_info *vscsi = cmd->adapter; struct iu_entry *iue = cmd->iue; int rc; + /* + * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success + * since LIO can't do anything about it, and we dont want to + * attempt an srp_transfer_data. + */ + if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) { + pr_err("write_pending failed since: %d\n", vscsi->flags); + return 0; + } + rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1, 1); if (rc) { @@ -3660,11 +3707,28 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd, se_cmd); struct scsi_info *vscsi = cmd->adapter; + struct ibmvscsis_cmd *cmd_itr; + struct iu_entry *iue = iue = cmd->iue; + struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt; + u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag); uint len; pr_debug("queue_tm_rsp %p, status %d\n", se_cmd, (int)se_cmd->se_tmr_req->response); + if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK && + cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) { + spin_lock_bh(&vscsi->intr_lock); + list_for_each_entry(cmd_itr, &vscsi->active_q, list) { + if (tag_to_abort == cmd_itr->se_cmd.tag) { + cmd_itr->abort_cmd = cmd; + cmd->flags |= DELAY_SEND; + break; + } + } + spin_unlock_bh(&vscsi->intr_lock); + } + srp_build_response(vscsi, cmd, &len); cmd->rsp.format = SRP_FORMAT; cmd->rsp.len = len; @@ -3672,8 +3736,8 @@ static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd) static void ibmvscsis_aborted_task(struct se_cmd *se_cmd) { - /* TBD: What (if anything) should we do here? */ - pr_debug("ibmvscsis_aborted_task %p\n", se_cmd); + pr_debug("ibmvscsis_aborted_task %p task_tag: %llu\n", + se_cmd, se_cmd->tag); } static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf, |