diff options
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r-- | drivers/scsi/lpfc/lpfc_init.c | 294 |
1 files changed, 198 insertions, 96 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 2b7ea7e53e12..f539c554588c 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1034,6 +1034,7 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) LIST_HEAD(nvmet_aborts); unsigned long iflag = 0; struct lpfc_sglq *sglq_entry = NULL; + int cnt; lpfc_sli_hbqbuf_free_all(phba); @@ -1090,11 +1091,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag); if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { + cnt = 0; list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) { psb->pCmd = NULL; psb->status = IOSTAT_SUCCESS; + cnt++; } spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag); + phba->put_nvme_bufs += cnt; list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put); spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag); @@ -3339,6 +3343,7 @@ lpfc_nvme_free(struct lpfc_hba *phba) list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_put, list) { list_del(&lpfc_ncmd->list); + phba->put_nvme_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); @@ -3350,6 +3355,7 @@ lpfc_nvme_free(struct lpfc_hba *phba) list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, &phba->lpfc_nvme_buf_list_get, list) { list_del(&lpfc_ncmd->list); + phba->get_nvme_bufs--; dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data, lpfc_ncmd->dma_handle); kfree(lpfc_ncmd); @@ -3754,9 +3760,11 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) uint16_t i, lxri, els_xri_cnt; uint16_t nvme_xri_cnt, nvme_xri_max; LIST_HEAD(nvme_sgl_list); - int rc; + int rc, cnt; phba->total_nvme_bufs = 0; + phba->get_nvme_bufs = 0; + phba->put_nvme_bufs = 0; if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) return 0; @@ -3780,6 +3788,9 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) spin_lock(&phba->nvme_buf_list_put_lock); list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list); list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list); + cnt = phba->get_nvme_bufs + phba->put_nvme_bufs; + phba->get_nvme_bufs = 0; + phba->put_nvme_bufs = 0; spin_unlock(&phba->nvme_buf_list_put_lock); spin_unlock_irq(&phba->nvme_buf_list_get_lock); @@ -3824,6 +3835,7 @@ lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba) spin_lock_irq(&phba->nvme_buf_list_get_lock); spin_lock(&phba->nvme_buf_list_put_lock); list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get); + phba->get_nvme_bufs = cnt; INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); spin_unlock(&phba->nvme_buf_list_put_lock); spin_unlock_irq(&phba->nvme_buf_list_get_lock); @@ -5609,8 +5621,10 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) /* Initialize the NVME buffer list used by driver for NVME IO */ spin_lock_init(&phba->nvme_buf_list_get_lock); INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get); + phba->get_nvme_bufs = 0; spin_lock_init(&phba->nvme_buf_list_put_lock); INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put); + phba->put_nvme_bufs = 0; } /* Initialize the fabric iocb list */ @@ -5806,6 +5820,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) struct lpfc_mqe *mqe; int longs; int fof_vectors = 0; + int extra; uint64_t wwn; phba->sli4_hba.num_online_cpu = num_online_cpus(); @@ -5860,13 +5875,21 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) */ /* + * 1 for cmd, 1 for rsp, NVME adds an extra one + * for boundary conditions in its max_sgl_segment template. + */ + extra = 2; + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + extra++; + + /* * It doesn't matter what family our adapter is in, we are * limited to 2 Pages, 512 SGEs, for our SGL. * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp */ max_buf_size = (2 * SLI4_PAGE_SIZE); - if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2) - phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2; + if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - extra) + phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - extra; /* * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size @@ -5899,14 +5922,14 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) */ phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp) + - ((phba->cfg_sg_seg_cnt + 2) * + ((phba->cfg_sg_seg_cnt + extra) * sizeof(struct sli4_sge)); /* Total SGEs for scsi_sg_list */ - phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2; + phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + extra; /* - * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only + * NOTE: if (phba->cfg_sg_seg_cnt + extra) <= 256 we only * need to post 1 page for the SGL. */ } @@ -5947,9 +5970,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list); INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list); - - /* Fast-path XRI aborted CQ Event work queue list */ - INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue); } /* This abort list used by worker thread */ @@ -7936,8 +7956,12 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba) phba->cfg_fcp_io_channel = io_channel; if (phba->cfg_nvme_io_channel > io_channel) phba->cfg_nvme_io_channel = io_channel; - if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) - phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + if (phba->nvmet_support) { + if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq) + phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel; + } + if (phba->cfg_nvmet_mrq > LPFC_NVMET_MRQ_MAX) + phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_MAX; lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n", @@ -7958,10 +7982,10 @@ static int lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) { struct lpfc_queue *qdesc; - int cnt; - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + phba->sli4_hba.cq_esize, + LPFC_CQE_EXP_COUNT); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0508 Failed allocate fast-path NVME CQ (%d)\n", @@ -7970,8 +7994,8 @@ lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx) } phba->sli4_hba.nvme_cq[wqidx] = qdesc; - cnt = LPFC_NVME_WQSIZE; - qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt); + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + LPFC_WQE128_SIZE, LPFC_WQE_EXP_COUNT); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0509 Failed allocate fast-path NVME WQ (%d)\n", @@ -7987,11 +8011,18 @@ static int lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) { struct lpfc_queue *qdesc; - uint32_t wqesize; /* Create Fast Path FCP CQs */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); + if (phba->fcp_embed_io) + /* Increase the CQ size when WQEs contain an embedded cdb */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + phba->sli4_hba.cq_esize, + LPFC_CQE_EXP_COUNT); + + else + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx); @@ -8000,9 +8031,15 @@ lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx) phba->sli4_hba.fcp_cq[wqidx] = qdesc; /* Create Fast Path FCP WQs */ - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount); + if (phba->fcp_embed_io) + /* Increase the WQ size when WQEs contain an embedded cdb */ + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_EXPANDED_PAGE_SIZE, + LPFC_WQE128_SIZE, + LPFC_WQE_EXP_COUNT); + else + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0503 Failed allocate fast-path FCP WQ (%d)\n", @@ -8173,7 +8210,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* Create HBA Event Queues (EQs) */ for (idx = 0; idx < io_channel; idx++) { /* Create EQs */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8196,8 +8234,9 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (phba->nvmet_support) { for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { qdesc = lpfc_sli4_queue_alloc(phba, - phba->sli4_hba.cq_esize, - phba->sli4_hba.cq_ecount); + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, + phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "3142 Failed allocate NVME " @@ -8213,7 +8252,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) */ /* Create slow-path Mailbox Command Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8223,7 +8263,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.mbx_cq = qdesc; /* Create slow-path ELS Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8239,7 +8280,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* Create Mailbox Command Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.mq_esize, phba->sli4_hba.mq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8253,7 +8295,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) */ /* Create slow-path ELS Work Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8265,7 +8308,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { /* Create NVME LS Complete Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8275,7 +8319,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.nvmels_cq = qdesc; /* Create NVME LS Work Queue */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, phba->sli4_hba.wq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8291,7 +8336,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) */ /* Create Receive Queue for header */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8301,7 +8347,8 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) phba->sli4_hba.hdr_rq = qdesc; /* Create Receive Queue for data */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.rq_esize, phba->sli4_hba.rq_ecount); if (!qdesc) { lpfc_printf_log(phba, KERN_ERR, LOG_INIT, @@ -8314,6 +8361,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) { /* Create NVMET Receive Queue for header */ qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, LPFC_NVMET_RQE_DEF_COUNT); if (!qdesc) { @@ -8339,6 +8387,7 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba) /* Create NVMET Receive Queue for data */ qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, phba->sli4_hba.rq_esize, LPFC_NVMET_RQE_DEF_COUNT); if (!qdesc) { @@ -8437,13 +8486,15 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) /* Release NVME CQ mapping array */ lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map); - lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, - phba->cfg_nvmet_mrq); + if (phba->nvmet_support) { + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset, + phba->cfg_nvmet_mrq); - lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, - phba->cfg_nvmet_mrq); - lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, - phba->cfg_nvmet_mrq); + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr, + phba->cfg_nvmet_mrq); + lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data, + phba->cfg_nvmet_mrq); + } /* Release mailbox command work queue */ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq); @@ -8514,6 +8565,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, qidx, (uint32_t)rc); return rc; } + cq->chann = qidx; if (qtype != LPFC_MBOX) { /* Setup nvme_cq_map for fast lookup */ @@ -8533,6 +8585,7 @@ lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq, /* no need to tear down cq - caller will do so */ return rc; } + wq->chann = qidx; /* Bind this CQ/WQ to the NVME ring */ pring = wq->pring; @@ -8773,6 +8826,8 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba) "rc = 0x%x\n", (uint32_t)rc); goto out_destroy; } + phba->sli4_hba.nvmet_cqset[0]->chann = 0; + lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "6090 NVMET CQ setup: cq-id=%d, " "parent eq-id=%d\n", @@ -8994,19 +9049,22 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba) for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]); - /* Unset NVMET MRQ queue */ - if (phba->sli4_hba.nvmet_mrq_hdr) { - for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) - lpfc_rq_destroy(phba, + if (phba->nvmet_support) { + /* Unset NVMET MRQ queue */ + if (phba->sli4_hba.nvmet_mrq_hdr) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_rq_destroy( + phba, phba->sli4_hba.nvmet_mrq_hdr[qidx], phba->sli4_hba.nvmet_mrq_data[qidx]); - } + } - /* Unset NVMET CQ Set complete queue */ - if (phba->sli4_hba.nvmet_cqset) { - for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) - lpfc_cq_destroy(phba, - phba->sli4_hba.nvmet_cqset[qidx]); + /* Unset NVMET CQ Set complete queue */ + if (phba->sli4_hba.nvmet_cqset) { + for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) + lpfc_cq_destroy( + phba, phba->sli4_hba.nvmet_cqset[qidx]); + } } /* Unset FCP response complete queue */ @@ -9175,11 +9233,6 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba) /* Pending ELS XRI abort events */ list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue, &cqelist); - if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) { - /* Pending NVME XRI abort events */ - list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue, - &cqelist); - } /* Pending asynnc events */ list_splice_init(&phba->sli4_hba.sp_asynce_work_queue, &cqelist); @@ -9421,44 +9474,62 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) lpfc_sli4_bar0_register_memmap(phba, if_type); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR2))) { - /* - * Map SLI4 if type 0 HBA Control Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); - bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); - phba->sli4_hba.ctrl_regs_memmap_p = - ioremap(phba->pci_bar1_map, bar1map_len); - if (!phba->sli4_hba.ctrl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA control registers.\n"); + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR2)) { + /* + * Map SLI4 if type 0 HBA Control Register base to a + * kernel virtual address and setup the registers. + */ + phba->pci_bar1_map = pci_resource_start(pdev, + PCI_64BIT_BAR2); + bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); + phba->sli4_hba.ctrl_regs_memmap_p = + ioremap(phba->pci_bar1_map, + bar1map_len); + if (!phba->sli4_hba.ctrl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA " + "control registers.\n"); + error = -ENOMEM; + goto out_iounmap_conf; + } + phba->pci_bar2_memmap_p = + phba->sli4_hba.ctrl_regs_memmap_p; + lpfc_sli4_bar1_register_memmap(phba); + } else { + error = -ENOMEM; goto out_iounmap_conf; } - phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; - lpfc_sli4_bar1_register_memmap(phba); } - if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && - (pci_resource_start(pdev, PCI_64BIT_BAR4))) { - /* - * Map SLI4 if type 0 HBA Doorbell Register base to a kernel - * virtual address and setup the registers. - */ - phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); - bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); - phba->sli4_hba.drbl_regs_memmap_p = - ioremap(phba->pci_bar2_map, bar2map_len); - if (!phba->sli4_hba.drbl_regs_memmap_p) { - dev_printk(KERN_ERR, &pdev->dev, - "ioremap failed for SLI4 HBA doorbell registers.\n"); - goto out_iounmap_ctrl; - } - phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; - error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); - if (error) + if (if_type == LPFC_SLI_INTF_IF_TYPE_0) { + if (pci_resource_start(pdev, PCI_64BIT_BAR4)) { + /* + * Map SLI4 if type 0 HBA Doorbell Register base to + * a kernel virtual address and setup the registers. + */ + phba->pci_bar2_map = pci_resource_start(pdev, + PCI_64BIT_BAR4); + bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); + phba->sli4_hba.drbl_regs_memmap_p = + ioremap(phba->pci_bar2_map, + bar2map_len); + if (!phba->sli4_hba.drbl_regs_memmap_p) { + dev_err(&pdev->dev, + "ioremap failed for SLI4 HBA" + " doorbell registers.\n"); + error = -ENOMEM; + goto out_iounmap_ctrl; + } + phba->pci_bar4_memmap_p = + phba->sli4_hba.drbl_regs_memmap_p; + error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); + if (error) + goto out_iounmap_all; + } else { + error = -ENOMEM; goto out_iounmap_all; + } } return 0; @@ -10093,6 +10164,16 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba) int fcp_xri_cmpl = 1; int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list); + /* Driver just aborted IOs during the hba_unset process. Pause + * here to give the HBA time to complete the IO and get entries + * into the abts lists. + */ + msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1 * 5); + + /* Wait for NVME pending IO to flush back to transport. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_nvme_wait_for_io_drain(phba); + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list); @@ -10369,7 +10450,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq) !phba->nvme_support) { phba->nvme_support = 0; phba->nvmet_support = 0; - phba->cfg_nvmet_mrq = 0; + phba->cfg_nvmet_mrq = LPFC_NVMET_MRQ_OFF; phba->cfg_nvme_io_channel = 0; phba->io_channel_irqs = phba->cfg_fcp_io_channel; lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME, @@ -11616,6 +11697,10 @@ lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba) /* Flush all driver's outstanding SCSI I/Os as we are to reset */ lpfc_sli_flush_fcp_rings(phba); + /* Flush the outstanding NVME IOs if fc4 type enabled. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_sli_flush_nvme_rings(phba); + /* stop all timers */ lpfc_stop_hba_timers(phba); @@ -11647,6 +11732,10 @@ lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba) /* Clean up all driver's outstanding SCSI I/Os */ lpfc_sli_flush_fcp_rings(phba); + + /* Flush the outstanding NVME IOs if fc4 type enabled. */ + if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) + lpfc_sli_flush_nvme_rings(phba); } /** @@ -12138,10 +12227,10 @@ int lpfc_fof_queue_create(struct lpfc_hba *phba) { struct lpfc_queue *qdesc; - uint32_t wqesize; /* Create FOF EQ */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize, + qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.eq_esize, phba->sli4_hba.eq_ecount); if (!qdesc) goto out_error; @@ -12151,7 +12240,15 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) if (phba->cfg_fof) { /* Create OAS CQ */ - qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize, + if (phba->fcp_embed_io) + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_EXPANDED_PAGE_SIZE, + phba->sli4_hba.cq_esize, + LPFC_CQE_EXP_COUNT); + else + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.cq_esize, phba->sli4_hba.cq_ecount); if (!qdesc) goto out_error; @@ -12159,11 +12256,16 @@ lpfc_fof_queue_create(struct lpfc_hba *phba) phba->sli4_hba.oas_cq = qdesc; /* Create OAS WQ */ - wqesize = (phba->fcp_embed_io) ? - LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize; - qdesc = lpfc_sli4_queue_alloc(phba, wqesize, - phba->sli4_hba.wq_ecount); - + if (phba->fcp_embed_io) + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_EXPANDED_PAGE_SIZE, + LPFC_WQE128_SIZE, + LPFC_WQE_EXP_COUNT); + else + qdesc = lpfc_sli4_queue_alloc(phba, + LPFC_DEFAULT_PAGE_SIZE, + phba->sli4_hba.wq_esize, + phba->sli4_hba.wq_ecount); if (!qdesc) goto out_error; |