diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/rx.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/rx.c | 229 |
1 files changed, 140 insertions, 89 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a2d709642b2a..427fcea5cb2d 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -200,12 +200,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - /* TODO: remove this for 22560 once fw does it */ + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + /* TODO: remove this once fw does it */ iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); - } else if (trans->cfg->mq_rx_supported) { + } else if (trans->trans_cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); @@ -232,7 +232,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, * 1. shadow registers aren't enabled * 2. there is a chance that the NIC is asleep */ - if (!trans->cfg->base_params->shadow_reg_enable && + if (!trans->trans_cfg->base_params->shadow_reg_enable && test_bit(STATUS_TPOWER_PMI, &trans->status)) { reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); @@ -240,18 +240,14 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n", reg); iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); rxq->need_update = true; return; } } rxq->write_actual = round_down(rxq->write, 8); - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) - iwl_write32(trans, HBUS_TARG_WRPTR, - (rxq->write_actual | - ((FIRST_RX_QUEUE + rxq->id) << 16))); - else if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); else @@ -279,7 +275,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, struct iwl_rxq *rxq, struct iwl_rx_mem_buffer *rxb) { - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { struct iwl_rx_transfer_desc *bd = rxq->bd; BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64)); @@ -302,6 +298,7 @@ static void iwl_pcie_restock_bd(struct iwl_trans *trans, static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; /* @@ -322,11 +319,11 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, list); list_del(&rxb->list); rxb->invalid = false; - /* 12 first bits are expected to be empty */ - WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); + /* some low bits are expected to be unset (depending on hw) */ + WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask); /* Point to Rx buffer via next RBD in circular buffer */ iwl_pcie_restock_bd(trans, rxq, rxb); - rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; + rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); rxq->free_count--; } spin_unlock(&rxq->lock); @@ -405,7 +402,7 @@ static void iwl_pcie_rxsq_restock(struct iwl_trans *trans, static void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) { - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rxmq_restock(trans, rxq); else iwl_pcie_rxsq_restock(trans, rxq); @@ -416,15 +413,34 @@ void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq) * */ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, - gfp_t priority) + u32 *offset, gfp_t priority) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size); + unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order; struct page *page; gfp_t gfp_mask = priority; if (trans_pcie->rx_page_order > 0) gfp_mask |= __GFP_COMP; + if (trans_pcie->alloc_page) { + spin_lock_bh(&trans_pcie->alloc_page_lock); + /* recheck */ + if (trans_pcie->alloc_page) { + *offset = trans_pcie->alloc_page_used; + page = trans_pcie->alloc_page; + trans_pcie->alloc_page_used += rbsize; + if (trans_pcie->alloc_page_used >= allocsize) + trans_pcie->alloc_page = NULL; + else + get_page(page); + spin_unlock_bh(&trans_pcie->alloc_page_lock); + return page; + } + spin_unlock_bh(&trans_pcie->alloc_page_lock); + } + /* Alloc a new receive buffer */ page = alloc_pages(gfp_mask, trans_pcie->rx_page_order); if (!page) { @@ -440,6 +456,18 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, "Failed to alloc_pages\n"); return NULL; } + + if (2 * rbsize <= allocsize) { + spin_lock_bh(&trans_pcie->alloc_page_lock); + if (!trans_pcie->alloc_page) { + get_page(page); + trans_pcie->alloc_page = page; + trans_pcie->alloc_page_used = rbsize; + } + spin_unlock_bh(&trans_pcie->alloc_page_lock); + } + + *offset = 0; return page; } @@ -460,6 +488,8 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, struct page *page; while (1) { + unsigned int offset; + spin_lock(&rxq->lock); if (list_empty(&rxq->rx_used)) { spin_unlock(&rxq->lock); @@ -467,8 +497,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, } spin_unlock(&rxq->lock); - /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, priority); + page = iwl_pcie_rx_alloc_page(trans, &offset, priority); if (!page) return; @@ -486,10 +515,11 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, BUG_ON(rxb->page); rxb->page = page; + rxb->offset = offset; /* Get physical address of the RB */ rxb->page_dma = - dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, + dma_map_page(trans->dev, page, rxb->offset, + trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; @@ -514,12 +544,11 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < RX_POOL_SIZE; i++) { + for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) { if (!trans_pcie->rx_pool[i].page) continue; dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); + trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); __free_pages(trans_pcie->rx_pool[i].page, trans_pcie->rx_page_order); trans_pcie->rx_pool[i].page = NULL; @@ -572,15 +601,17 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) BUG_ON(rxb->page); /* Alloc a new receive buffer */ - page = iwl_pcie_rx_alloc_page(trans, gfp_mask); + page = iwl_pcie_rx_alloc_page(trans, &rxb->offset, + gfp_mask); if (!page) continue; rxb->page = page; /* Get physical address of the RB */ - rxb->page_dma = dma_map_page(trans->dev, page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, - DMA_FROM_DEVICE); + rxb->page_dma = dma_map_page(trans->dev, page, + rxb->offset, + trans_pcie->rx_buf_bytes, + DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { rxb->page = NULL; __free_pages(page, trans_pcie->rx_page_order); @@ -682,7 +713,7 @@ static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) if (use_rx_td) return sizeof(*rx_td); else - return trans->cfg->mq_rx_supported ? sizeof(__le64) : + return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) : sizeof(__le32); } @@ -690,8 +721,8 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct device *dev = trans->dev; - bool use_rx_td = (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560); + bool use_rx_td = (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210); int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); if (rxq->bd) @@ -712,7 +743,7 @@ static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, rxq->used_bd_dma = 0; rxq->used_bd = NULL; - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) return; if (rxq->tr_tail) @@ -735,14 +766,14 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, struct device *dev = trans->dev; int i; int free_size; - bool use_rx_td = (trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560); + bool use_rx_td = (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210); size_t rb_stts_size = use_rx_td ? sizeof(__le16) : sizeof(struct iwl_rb_status); spin_lock_init(&rxq->lock); - if (trans->cfg->mq_rx_supported) - rxq->queue_size = MQ_RX_TABLE_SIZE; + if (trans->trans_cfg->mq_rx_supported) + rxq->queue_size = trans->cfg->num_rbds; else rxq->queue_size = RX_QUEUE_SIZE; @@ -757,7 +788,7 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, if (!rxq->bd) goto err; - if (trans->cfg->mq_rx_supported) { + if (trans->trans_cfg->mq_rx_supported) { rxq->used_bd = dma_alloc_coherent(dev, (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size, &rxq->used_bd_dma, @@ -784,11 +815,6 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, &rxq->cr_tail_dma, GFP_KERNEL); if (!rxq->cr_tail) goto err; - /* - * W/A 22560 device step Z0 must be non zero bug - * TODO: remove this when stop supporting Z0 - */ - *rxq->cr_tail = cpu_to_le16(500); return 0; @@ -802,13 +828,13 @@ err: return -ENOMEM; } -int iwl_pcie_rx_alloc(struct iwl_trans *trans) +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i, ret; - size_t rb_stts_size = trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560 ? + size_t rb_stts_size = trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210 ? sizeof(__le16) : sizeof(struct iwl_rb_status); if (WARN_ON(trans_pcie->rxq)) @@ -816,8 +842,18 @@ int iwl_pcie_rx_alloc(struct iwl_trans *trans) trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), GFP_KERNEL); - if (!trans_pcie->rxq) - return -ENOMEM; + trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), + sizeof(trans_pcie->rx_pool[0]), + GFP_KERNEL); + trans_pcie->global_table = + kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs), + sizeof(trans_pcie->global_table[0]), + GFP_KERNEL); + if (!trans_pcie->rxq || !trans_pcie->rx_pool || + !trans_pcie->global_table) { + ret = -ENOMEM; + goto err; + } spin_lock_init(&rba->lock); @@ -854,6 +890,8 @@ err: trans_pcie->base_rb_stts = NULL; trans_pcie->base_rb_stts_dma = 0; } + kfree(trans_pcie->rx_pool); + kfree(trans_pcie->global_table); kfree(trans_pcie->rxq); return ret; @@ -1033,7 +1071,7 @@ int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) return 0; } -int _iwl_pcie_rx_init(struct iwl_trans *trans) +static int _iwl_pcie_rx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rxq *def_rxq; @@ -1075,7 +1113,8 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans) rxq->write = 0; rxq->write_actual = 0; memset(rxq->rb_stts, 0, - (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210) ? sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); @@ -1088,13 +1127,12 @@ int _iwl_pcie_rx_init(struct iwl_trans *trans) } /* move the pool to the default queue and allocator ownerships */ - queue_size = trans->cfg->mq_rx_supported ? - MQ_RX_NUM_RBDS : RX_QUEUE_SIZE; + queue_size = trans->trans_cfg->mq_rx_supported ? + trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE; allocator_pool_size = trans->num_rx_queues * (RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC); num_alloc = queue_size + allocator_pool_size; - BUILD_BUG_ON(ARRAY_SIZE(trans_pcie->global_table) != - ARRAY_SIZE(trans_pcie->rx_pool)); + for (i = 0; i < num_alloc; i++) { struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i]; @@ -1120,7 +1158,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans) if (ret) return ret; - if (trans->cfg->mq_rx_supported) + if (trans->trans_cfg->mq_rx_supported) iwl_pcie_rx_mq_hw_init(trans); else iwl_pcie_rx_hw_init(trans, trans_pcie->rxq); @@ -1151,8 +1189,8 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; int i; - size_t rb_stts_size = trans->cfg->device_family >= - IWL_DEVICE_FAMILY_22560 ? + size_t rb_stts_size = trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210 ? sizeof(__le16) : sizeof(struct iwl_rb_status); /* @@ -1185,7 +1223,12 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) if (rxq->napi.poll) netif_napi_del(&rxq->napi); } + kfree(trans_pcie->rx_pool); + kfree(trans_pcie->global_table); kfree(trans_pcie->rxq); + + if (trans_pcie->alloc_page) + __free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order); } static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq, @@ -1243,7 +1286,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; bool page_stolen = false; - int max_len = PAGE_SIZE << trans_pcie->rx_page_order; + int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; if (WARN_ON(!rxb)) @@ -1257,7 +1300,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, bool reclaim; int index, cmd_index, len; struct iwl_rx_cmd_buffer rxcb = { - ._offset = offset, + ._offset = rxb->offset + offset, ._rx_page_order = trans_pcie->rx_page_order, ._page = rxb->page, ._page_stolen = false, @@ -1347,7 +1390,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } page_stolen |= rxcb._page_stolen; - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) break; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } @@ -1363,8 +1406,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, * rx_free list for reuse later. */ if (rxb->page != NULL) { rxb->page_dma = - dma_map_page(trans->dev, rxb->page, 0, - PAGE_SIZE << trans_pcie->rx_page_order, + dma_map_page(trans->dev, rxb->page, rxb->offset, + trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE); if (dma_mapping_error(trans->dev, rxb->page_dma)) { /* @@ -1392,19 +1435,18 @@ static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32); - if (!trans->cfg->mq_rx_supported) { + if (!trans->trans_cfg->mq_rx_supported) { rxb = rxq->queue[i]; rxq->queue[i] = NULL; return rxb; } - /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + vid = le16_to_cpu(rxq->cd[i].rbid); else - vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; + vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */ - if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) + if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs)) goto out_err; rxb = trans_pcie->global_table[vid - 1]; @@ -1429,6 +1471,7 @@ out_err: static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct napi_struct *napi; struct iwl_rxq *rxq; u32 r, i, count = 0; bool emergency = false; @@ -1515,7 +1558,7 @@ out: /* Backtrack one entry */ rxq->read = i; /* update cr tail with the rxq read pointer */ - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) *rxq->cr_tail = cpu_to_le16(r); spin_unlock(&rxq->lock); @@ -1534,8 +1577,16 @@ out: if (unlikely(emergency && count)) iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq); - if (rxq->napi.poll) - napi_gro_flush(&rxq->napi, false); + napi = &rxq->napi; + if (napi->poll) { + napi_gro_flush(napi, false); + + if (napi->rx_count) { + netif_receive_skb_list(&napi->rx_list); + INIT_LIST_HEAD(&napi->rx_list); + napi->rx_count = 0; + } + } iwl_pcie_rxq_restock(trans, rxq); } @@ -1597,7 +1648,7 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) return; } - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { if (!trans_pcie->txq[i]) continue; del_timer(&trans_pcie->txq[i]->stuck_timer); @@ -1838,7 +1889,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id) if (inta & CSR_INT_BIT_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->cfg->gen2) { + if (trans->trans_cfg->gen2) { /* * We can restock, since firmware configured * the RFH @@ -2152,8 +2203,7 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); @@ -2179,29 +2229,30 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) { IWL_DEBUG_ISR(trans, "Alive interrupt\n"); isr_stats->alive++; - if (trans->cfg->gen2) { + if (trans->trans_cfg->gen2) { /* We can restock, since firmware configured the RFH */ iwl_pcie_rxmq_restock(trans, trans_pcie->rxq); } } - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560 && - inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { - /* Reflect IML transfer status */ - int res = iwl_read32(trans, CSR_IML_RESP_ADDR); + if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + u32 sleep_notif = + le32_to_cpu(trans_pcie->prph_info->sleep_notif); + if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND || + sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) { + IWL_DEBUG_ISR(trans, + "Sx interrupt: sleep notification = 0x%x\n", + sleep_notif); + trans_pcie->sx_complete = true; + wake_up(&trans_pcie->sx_waitq); + } else { + /* uCode wakes up after power-down sleep */ + IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); + iwl_pcie_rxq_check_wrptr(trans); + iwl_pcie_txq_check_wrptrs(trans); - IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); - if (res == IWL_IMAGE_RESP_FAIL) { - isr_stats->sw++; - iwl_pcie_irq_handle_error(trans); + isr_stats->wakeup++; } - } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { - /* uCode wakes up after power-down sleep */ - IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); - iwl_pcie_rxq_check_wrptr(trans); - iwl_pcie_txq_check_wrptrs(trans); - - isr_stats->wakeup++; } if (inta_hw & MSIX_HW_INT_CAUSES_REG_IML) { |