diff options
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c')
| -rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c | 622 |
1 files changed, 372 insertions, 250 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c index 222d410c586e..f6d823f012db 100644 --- a/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c +++ b/drivers/net/wireless/iwlwifi/iwl-trans-tx-pcie.c @@ -29,23 +29,30 @@ #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/sched.h> -#include <net/mac80211.h> -#include "iwl-agn.h" +/* TODO: remove include to iwl-dev.h */ #include "iwl-dev.h" -#include "iwl-core.h" +#include "iwl-debug.h" +#include "iwl-csr.h" +#include "iwl-prph.h" #include "iwl-io.h" +#include "iwl-agn-hw.h" #include "iwl-helpers.h" #include "iwl-trans-int-pcie.h" +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + /** * iwl_trans_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array */ -void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, +void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq, u16 byte_cnt) { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + struct iwlagn_scd_bc_tbl *scd_bc_tbl; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); int write_ptr = txq->q.write_ptr; int txq_id = txq->q.id; u8 sec_ctl = 0; @@ -53,6 +60,8 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; + scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; + WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX); sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id; @@ -82,7 +91,7 @@ void iwl_trans_txq_update_byte_cnt_tbl(struct iwl_priv *priv, /** * iwl_txq_update_write_ptr - Send new write index to hardware */ -void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) +void iwl_txq_update_write_ptr(struct iwl_trans *trans, struct iwl_tx_queue *txq) { u32 reg = 0; int txq_id = txq->q.id; @@ -90,28 +99,28 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) if (txq->need_update == 0) return; - if (priv->cfg->base_params->shadow_reg_enable) { + if (hw_params(trans).shadow_reg_enable) { /* shadow register enabled */ - iwl_write32(priv, HBUS_TARG_WRPTR, + iwl_write32(bus(trans), HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); } else { /* if we're trying to save power */ - if (test_bit(STATUS_POWER_PMI, &priv->status)) { + if (test_bit(STATUS_POWER_PMI, &trans->shrd->status)) { /* wake up nic if it's powered down ... * uCode will wake up, and interrupt us again, so next * time we'll skip this part. */ - reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); + reg = iwl_read32(bus(trans), CSR_UCODE_DRV_GP1); if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { - IWL_DEBUG_INFO(priv, + IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup," " GP1 = 0x%x\n", txq_id, reg); - iwl_set_bit(priv, CSR_GP_CNTRL, + iwl_set_bit(bus(trans), CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); return; } - iwl_write_direct32(priv, HBUS_TARG_WRPTR, + iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); /* @@ -120,7 +129,7 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq) * trying to tx (during RFKILL, we're not trying to tx). */ } else - iwl_write32(priv, HBUS_TARG_WRPTR, + iwl_write32(bus(trans), HBUS_TARG_WRPTR, txq->q.write_ptr | (txq_id << 8)); } txq->need_update = 0; @@ -165,7 +174,7 @@ static inline u8 iwl_tfd_get_num_tbs(struct iwl_tfd *tfd) return tfd->num_tbs & 0x1f; } -static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, +static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta, struct iwl_tfd *tfd, enum dma_data_direction dma_dir) { int i; @@ -175,56 +184,56 @@ static void iwlagn_unmap_tfd(struct iwl_priv *priv, struct iwl_cmd_meta *meta, num_tbs = iwl_tfd_get_num_tbs(tfd); if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Too many chunks: %i\n", num_tbs); + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); /* @todo issue fatal error, it is quite serious situation */ return; } /* Unmap tx_cmd */ if (num_tbs) - dma_unmap_single(priv->bus->dev, + dma_unmap_single(bus(trans)->dev, dma_unmap_addr(meta, mapping), dma_unmap_len(meta, len), DMA_BIDIRECTIONAL); /* Unmap chunks, if any. */ for (i = 1; i < num_tbs; i++) - dma_unmap_single(priv->bus->dev, iwl_tfd_tb_get_addr(tfd, i), + dma_unmap_single(bus(trans)->dev, iwl_tfd_tb_get_addr(tfd, i), iwl_tfd_tb_get_len(tfd, i), dma_dir); } /** * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @priv - driver private data + * @trans - transport private data * @txq - tx queue * @index - the index of the TFD to be freed * * Does NOT advance any TFD circular buffer read/write indexes * Does NOT free the TFD itself (which is within circular buffer) */ -void iwlagn_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq, +void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, int index) { struct iwl_tfd *tfd_tmp = txq->tfds; - iwlagn_unmap_tfd(priv, &txq->meta[index], &tfd_tmp[index], + iwlagn_unmap_tfd(trans, &txq->meta[index], &tfd_tmp[index], DMA_TO_DEVICE); /* free SKB */ - if (txq->txb) { + if (txq->skbs) { struct sk_buff *skb; - skb = txq->txb[index].skb; + skb = txq->skbs[index]; /* can be called from irqs-disabled context */ if (skb) { dev_kfree_skb_any(skb); - txq->txb[index].skb = NULL; + txq->skbs[index] = NULL; } } } -int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, +int iwlagn_txq_attach_buf_to_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, dma_addr_t addr, u16 len, u8 reset) @@ -244,7 +253,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, /* Each TFD can point to a maximum 20 Tx buffers */ if (num_tbs >= IWL_NUM_OF_TBS) { - IWL_ERR(priv, "Error can not send more than %d chunks\n", + IWL_ERR(trans, "Error can not send more than %d chunks\n", IWL_NUM_OF_TBS); return -EINVAL; } @@ -253,7 +262,7 @@ int iwlagn_txq_attach_buf_to_tfd(struct iwl_priv *priv, return -EINVAL; if (unlikely(addr & ~IWL_TX_DMA_MASK)) - IWL_ERR(priv, "Unaligned address = %llx\n", + IWL_ERR(trans, "Unaligned address = %llx\n", (unsigned long long)addr); iwl_tfd_set_tb(tfd, num_tbs, addr, len); @@ -302,8 +311,7 @@ int iwl_queue_space(const struct iwl_queue *q) /** * iwl_queue_init - Initialize queue's high/low-water and read/write indexes */ -int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, - int count, int slots_num, u32 id) +int iwl_queue_init(struct iwl_queue *q, int count, int slots_num, u32 id) { q->n_bd = count; q->n_window = slots_num; @@ -332,16 +340,12 @@ int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q, return 0; } -/*TODO: this functions should NOT be exported from trans module - export it - * until the reclaim flow will be brought to the transport module too. - * Add a declaration to make sparse happy */ -void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, - struct iwl_tx_queue *txq); - -void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, +static void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, struct iwl_tx_queue *txq) { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; int txq_id = txq->q.id; int read_ptr = txq->q.read_ptr; u8 sta_id = 0; @@ -349,7 +353,7 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - if (txq_id != priv->cmd_queue) + if (txq_id != trans->shrd->cmd_queue) sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id; bc_ent = cpu_to_le16(1 | (sta_id << 12)); @@ -360,56 +364,61 @@ void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv, tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent; } -static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid, +static int iwlagn_tx_queue_set_q2ratid(struct iwl_trans *trans, u16 ra_tid, u16 txq_id) { u32 tbl_dw_addr; u32 tbl_dw; u16 scd_q2ratid; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; - tbl_dw_addr = priv->scd_base_addr + + tbl_dw_addr = trans_pcie->scd_base_addr + SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); - tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr); + tbl_dw = iwl_read_targ_mem(bus(trans), tbl_dw_addr); if (txq_id & 0x1) tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); else tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); - iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw); + iwl_write_targ_mem(bus(trans), tbl_dw_addr, tbl_dw); return 0; } -static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id) +static void iwlagn_tx_queue_stop_scheduler(struct iwl_trans *trans, u16 txq_id) { /* Simply stop the queue, but don't change any configuration; * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */ - iwl_write_prph(priv, + iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id), (0 << SCD_QUEUE_STTS_REG_POS_ACTIVE)| (1 << SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN)); } -void iwl_trans_set_wr_ptrs(struct iwl_priv *priv, +void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index) { - iwl_write_direct32(priv, HBUS_TARG_WRPTR, + iwl_write_direct32(bus(trans), HBUS_TARG_WRPTR, (index & 0xff) | (txq_id << 8)); - iwl_write_prph(priv, SCD_QUEUE_RDPTR(txq_id), index); + iwl_write_prph(bus(trans), SCD_QUEUE_RDPTR(txq_id), index); } -void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, +void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, struct iwl_tx_queue *txq, int tx_fifo_id, int scd_retry) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int txq_id = txq->q.id; - int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0; + int active = + test_bit(txq_id, &trans_pcie->txq_ctx_active_msk) ? 1 : 0; - iwl_write_prph(priv, SCD_QUEUE_STATUS_BITS(txq_id), + iwl_write_prph(bus(trans), SCD_QUEUE_STATUS_BITS(txq_id), (active << SCD_QUEUE_STTS_REG_POS_ACTIVE) | (tx_fifo_id << SCD_QUEUE_STTS_REG_POS_TXF) | (1 << SCD_QUEUE_STTS_REG_POS_WSL) | @@ -417,55 +426,75 @@ void iwl_trans_tx_queue_set_status(struct iwl_priv *priv, txq->sched_retry = scd_retry; - IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n", + IWL_DEBUG_INFO(trans, "%s %s Queue %d on FIFO %d\n", active ? "Activate" : "Deactivate", scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id); } -void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, - int frame_limit) +static inline int get_fifo_from_tid(struct iwl_trans_pcie *trans_pcie, + u8 ctx, u16 tid) +{ + const u8 *ac_to_fifo = trans_pcie->ac_to_fifo[ctx]; + if (likely(tid < ARRAY_SIZE(tid_to_ac))) + return ac_to_fifo[tid_to_ac[tid]]; + + /* no support for TIDs 8-15 yet */ + return -EINVAL; +} + +void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, + int tid, int frame_limit) { int tx_fifo, txq_id, ssn_idx; u16 ra_tid; unsigned long flags; struct iwl_tid_data *tid_data; + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(trans); + if (WARN_ON(sta_id == IWL_INVALID_STATION)) return; - if (WARN_ON(tid >= MAX_TID_COUNT)) + if (WARN_ON(tid >= IWL_MAX_TID_COUNT)) return; - spin_lock_irqsave(&priv->sta_lock, flags); - tid_data = &priv->stations[sta_id].tid[tid]; + tx_fifo = get_fifo_from_tid(trans_pcie, ctx, tid); + if (WARN_ON(tx_fifo < 0)) { + IWL_ERR(trans, "txq_agg_setup, bad fifo: %d\n", tx_fifo); + return; + } + + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + tid_data = &trans->shrd->tid_data[sta_id][tid]; ssn_idx = SEQ_TO_SN(tid_data->seq_number); txq_id = tid_data->agg.txq_id; - tx_fifo = tid_data->agg.tx_fifo; - spin_unlock_irqrestore(&priv->sta_lock, flags); + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); ra_tid = BUILD_RAxTID(sta_id, tid); - spin_lock_irqsave(&priv->lock, flags); + spin_lock_irqsave(&trans->shrd->lock, flags); /* Stop this Tx queue before configuring it */ - iwlagn_tx_queue_stop_scheduler(priv, txq_id); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); /* Map receiver-address / traffic-ID to this queue */ - iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id); + iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); /* Set this queue as a chain-building queue */ - iwl_set_bits_prph(priv, SCD_QUEUECHAIN_SEL, (1<<txq_id)); + iwl_set_bits_prph(bus(trans), SCD_QUEUECHAIN_SEL, (1<<txq_id)); /* enable aggregations for the queue */ - iwl_set_bits_prph(priv, SCD_AGGR_SEL, (1<<txq_id)); + iwl_set_bits_prph(bus(trans), SCD_AGGR_SEL, (1<<txq_id)); /* Place first TFD at index corresponding to start sequence number. * Assumes that ssn_idx is valid (!= 0xFFF) */ - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); + trans_pcie->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); + trans_pcie->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); + iwl_trans_set_wr_ptrs(trans, txq_id, ssn_idx); /* Set up Tx window size and frame limit for this queue */ - iwl_write_targ_mem(priv, priv->scd_base_addr + + iwl_write_targ_mem(bus(trans), trans_pcie->scd_base_addr + SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), ((frame_limit << @@ -475,40 +504,158 @@ void iwl_trans_txq_agg_setup(struct iwl_priv *priv, int sta_id, int tid, SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) & SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK)); - iwl_set_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_set_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ - iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], + tx_fifo, 1); + + trans_pcie->txq[txq_id].sta_id = sta_id; + trans_pcie->txq[txq_id].tid = tid; - spin_unlock_irqrestore(&priv->lock, flags); + spin_unlock_irqrestore(&trans->shrd->lock, flags); } -int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, - u16 ssn_idx, u8 tx_fifo) +/* + * Find first available (lowest unused) Tx Queue, mark it "active". + * Called only when finding queue for aggregation. + * Should never return anything < 7, because they should already + * be in use as EDCA AC (0-3), Command (4), reserved (5, 6) + */ +static int iwlagn_txq_ctx_activate_free(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int txq_id; + + for (txq_id = 0; txq_id < hw_params(trans).max_txq_num; txq_id++) + if (!test_and_set_bit(txq_id, + &trans_pcie->txq_ctx_active_msk)) + return txq_id; + return -1; +} + +int iwl_trans_pcie_tx_agg_alloc(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, + int tid, u16 *ssn) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tid_data *tid_data; + unsigned long flags; + int txq_id; + + txq_id = iwlagn_txq_ctx_activate_free(trans); + if (txq_id == -1) { + IWL_ERR(trans, "No free aggregation queue available\n"); + return -ENXIO; + } + + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + tid_data = &trans->shrd->tid_data[sta_id][tid]; + *ssn = SEQ_TO_SN(tid_data->seq_number); + tid_data->agg.txq_id = txq_id; + iwl_set_swq_id(&trans_pcie->txq[txq_id], get_ac_from_tid(tid), txq_id); + + tid_data = &trans->shrd->tid_data[sta_id][tid]; + if (tid_data->tfds_in_queue == 0) { + IWL_DEBUG_HT(trans, "HW queue is empty\n"); + tid_data->agg.state = IWL_AGG_ON; + iwl_start_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); + } else { + IWL_DEBUG_HT(trans, "HW queue is NOT empty: %d packets in HW" + "queue\n", tid_data->tfds_in_queue); + tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA; + } + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + + return 0; +} + +void iwl_trans_pcie_txq_agg_disable(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + iwlagn_tx_queue_stop_scheduler(trans, txq_id); + + iwl_clear_bits_prph(bus(trans), SCD_AGGR_SEL, (1 << txq_id)); + + trans_pcie->txq[txq_id].q.read_ptr = 0; + trans_pcie->txq[txq_id].q.write_ptr = 0; + /* supposes that ssn_idx is valid (!= 0xFFF) */ + iwl_trans_set_wr_ptrs(trans, txq_id, 0); + + iwl_clear_bits_prph(bus(trans), SCD_INTERRUPT_MASK, (1 << txq_id)); + iwl_txq_ctx_deactivate(trans_pcie, txq_id); + iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], 0, 0); +} + +int iwl_trans_pcie_tx_agg_disable(struct iwl_trans *trans, + enum iwl_rxon_context_id ctx, int sta_id, + int tid) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + unsigned long flags; + int read_ptr, write_ptr; + struct iwl_tid_data *tid_data; + int txq_id; + + spin_lock_irqsave(&trans->shrd->sta_lock, flags); + + tid_data = &trans->shrd->tid_data[sta_id][tid]; + txq_id = tid_data->agg.txq_id; + if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) || (IWLAGN_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues <= txq_id)) { - IWL_ERR(priv, + hw_params(trans).num_ampdu_queues <= txq_id)) { + IWL_ERR(trans, "queue number out of range: %d, must be %d to %d\n", txq_id, IWLAGN_FIRST_AMPDU_QUEUE, IWLAGN_FIRST_AMPDU_QUEUE + - priv->cfg->base_params->num_of_ampdu_queues - 1); + hw_params(trans).num_ampdu_queues - 1); + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); return -EINVAL; } - iwlagn_tx_queue_stop_scheduler(priv, txq_id); + switch (trans->shrd->tid_data[sta_id][tid].agg.state) { + case IWL_EMPTYING_HW_QUEUE_ADDBA: + /* + * This can happen if the peer stops aggregation + * again before we've had a chance to drain the + * queue we selected previously, i.e. before the + * session was really started completely. + */ + IWL_DEBUG_HT(trans, "AGG stop before setup done\n"); + goto turn_off; + case IWL_AGG_ON: + break; + default: + IWL_WARN(trans, "Stopping AGG while state not ON" + "or starting\n"); + } - iwl_clear_bits_prph(priv, SCD_AGGR_SEL, (1 << txq_id)); + write_ptr = trans_pcie->txq[txq_id].q.write_ptr; + read_ptr = trans_pcie->txq[txq_id].q.read_ptr; - priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff); - priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff); - /* supposes that ssn_idx is valid (!= 0xFFF) */ - iwl_trans_set_wr_ptrs(priv, txq_id, ssn_idx); + /* The queue is not empty */ + if (write_ptr != read_ptr) { + IWL_DEBUG_HT(trans, "Stopping a non empty AGG HW QUEUE\n"); + trans->shrd->tid_data[sta_id][tid].agg.state = + IWL_EMPTYING_HW_QUEUE_DELBA; + spin_unlock_irqrestore(&trans->shrd->sta_lock, flags); + return 0; + } + + IWL_DEBUG_HT(trans, "HW queue is empty\n"); +turn_off: + trans->shrd->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; + + /* do not restore/save irqs */ + spin_unlock(&trans->shrd->sta_lock); + spin_lock(&trans->shrd->lock); + + iwl_trans_pcie_txq_agg_disable(trans, txq_id); + + spin_unlock_irqrestore(&trans->shrd->lock, flags); - iwl_clear_bits_prph(priv, SCD_INTERRUPT_MASK, (1 << txq_id)); - iwl_txq_ctx_deactivate(priv, txq_id); - iwl_trans_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0); + iwl_stop_tx_ba_trans_ready(priv(trans), ctx, sta_id, tid); return 0; } @@ -524,9 +671,10 @@ int iwl_trans_txq_agg_disable(struct iwl_priv *priv, u16 txq_id, * failed. On success, it turns the index (> 0) of command in the * command queue. */ -static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +static int iwl_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; struct iwl_queue *q = &txq->q; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; @@ -544,14 +692,14 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) int trace_idx; #endif - if (test_bit(STATUS_FW_ERROR, &priv->status)) { - IWL_WARN(priv, "fw recovery, no hcmd send\n"); + if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { + IWL_WARN(trans, "fw recovery, no hcmd send\n"); return -EIO; } - if ((priv->ucode_owner == IWL_OWNERSHIP_TM) && + if ((trans->shrd->ucode_owner == IWL_OWNERSHIP_TM) && !(cmd->flags & CMD_ON_DEMAND)) { - IWL_DEBUG_HC(priv, "tm own the uCode, no regular hcmd send\n"); + IWL_DEBUG_HC(trans, "tm own the uCode, no regular hcmd send\n"); return -EIO; } @@ -584,22 +732,22 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) if (WARN_ON(copy_size > TFD_MAX_PAYLOAD_SIZE)) return -EINVAL; - if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) { - IWL_WARN(priv, "Not sending command - %s KILL\n", - iwl_is_rfkill(priv) ? "RF" : "CT"); + if (iwl_is_rfkill(trans->shrd) || iwl_is_ctkill(trans->shrd)) { + IWL_WARN(trans, "Not sending command - %s KILL\n", + iwl_is_rfkill(trans->shrd) ? "RF" : "CT"); return -EIO; } - spin_lock_irqsave(&priv->hcmd_lock, flags); + spin_lock_irqsave(&trans->hcmd_lock, flags); if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { - spin_unlock_irqrestore(&priv->hcmd_lock, flags); + spin_unlock_irqrestore(&trans->hcmd_lock, flags); - IWL_ERR(priv, "No space in command queue\n"); - is_ct_kill = iwl_check_for_ct_kill(priv); + IWL_ERR(trans, "No space in command queue\n"); + is_ct_kill = iwl_check_for_ct_kill(priv(trans)); if (!is_ct_kill) { - IWL_ERR(priv, "Restarting adapter due to queue full\n"); - iwlagn_fw_error(priv, false); + IWL_ERR(trans, "Restarting adapter queue is full\n"); + iwlagn_fw_error(priv(trans), false); } return -ENOSPC; } @@ -618,8 +766,9 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) out_cmd->hdr.cmd = cmd->id; out_cmd->hdr.flags = 0; - out_cmd->hdr.sequence = cpu_to_le16(QUEUE_TO_SEQ(priv->cmd_queue) | - INDEX_TO_SEQ(q->write_ptr)); + out_cmd->hdr.sequence = + cpu_to_le16(QUEUE_TO_SEQ(trans->shrd->cmd_queue) | + INDEX_TO_SEQ(q->write_ptr)); /* and copy the data that needs to be copied */ @@ -633,16 +782,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) cmd_dest += cmd->len[i]; } - IWL_DEBUG_HC(priv, "Sending command %s (#%x), seq: 0x%04X, " + IWL_DEBUG_HC(trans, "Sending command %s (#%x), seq: 0x%04X, " "%d bytes at %d[%d]:%d\n", get_cmd_string(out_cmd->hdr.cmd), out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), cmd_size, - q->write_ptr, idx, priv->cmd_queue); + q->write_ptr, idx, trans->shrd->cmd_queue); - phys_addr = dma_map_single(priv->bus->dev, &out_cmd->hdr, copy_size, + phys_addr = dma_map_single(bus(trans)->dev, &out_cmd->hdr, copy_size, DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(priv->bus->dev, phys_addr))) { + if (unlikely(dma_mapping_error(bus(trans)->dev, phys_addr))) { idx = -ENOMEM; goto out; } @@ -650,7 +799,8 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) dma_unmap_addr_set(out_meta, mapping, phys_addr); dma_unmap_len_set(out_meta, len, copy_size); - iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, copy_size, 1); + iwlagn_txq_attach_buf_to_tfd(trans, txq, + phys_addr, copy_size, 1); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[0] = &out_cmd->hdr; trace_lens[0] = copy_size; @@ -662,17 +812,18 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) continue; if (!(cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY)) continue; - phys_addr = dma_map_single(priv->bus->dev, (void *)cmd->data[i], + phys_addr = dma_map_single(bus(trans)->dev, + (void *)cmd->data[i], cmd->len[i], DMA_BIDIRECTIONAL); - if (dma_mapping_error(priv->bus->dev, phys_addr)) { - iwlagn_unmap_tfd(priv, out_meta, + if (dma_mapping_error(bus(trans)->dev, phys_addr)) { + iwlagn_unmap_tfd(trans, out_meta, &txq->tfds[q->write_ptr], DMA_BIDIRECTIONAL); idx = -ENOMEM; goto out; } - iwlagn_txq_attach_buf_to_tfd(priv, txq, phys_addr, + iwlagn_txq_attach_buf_to_tfd(trans, txq, phys_addr, cmd->len[i], 0); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING trace_bufs[trace_idx] = cmd->data[i]; @@ -688,7 +839,7 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) /* check that tracing gets all possible blocks */ BUILD_BUG_ON(IWL_MAX_CMD_TFDS + 1 != 3); #ifdef CONFIG_IWLWIFI_DEVICE_TRACING - trace_iwlwifi_dev_hcmd(priv, cmd->flags, + trace_iwlwifi_dev_hcmd(priv(trans), cmd->flags, trace_bufs[0], trace_lens[0], trace_bufs[1], trace_lens[1], trace_bufs[2], trace_lens[2]); @@ -696,10 +847,10 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) /* Increment and update queue's write index */ q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); - iwl_txq_update_write_ptr(priv, txq); + iwl_txq_update_write_ptr(trans, txq); out: - spin_unlock_irqrestore(&priv->hcmd_lock, flags); + spin_unlock_irqrestore(&trans->hcmd_lock, flags); return idx; } @@ -710,14 +861,16 @@ static int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) * need to be reclaimed. As result, some free space forms. If there is * enough free space (> low mark), wake the stack that feeds us. */ -static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) +static void iwl_hcmd_queue_reclaim(struct iwl_trans *trans, int txq_id, + int idx) { - struct iwl_tx_queue *txq = &priv->txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; struct iwl_queue *q = &txq->q; int nfreed = 0; if ((idx >= q->n_bd) || (iwl_queue_used(q, idx) == 0)) { - IWL_ERR(priv, "%s: Read index for DMA queue txq id (%d), " + IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " "index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, q->n_bd, q->write_ptr, q->read_ptr); return; @@ -727,9 +880,9 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { if (nfreed++ > 0) { - IWL_ERR(priv, "HCMD skipped: index (%d) %d %d\n", idx, + IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", idx, q->write_ptr, q->read_ptr); - iwlagn_fw_error(priv, false); + iwlagn_fw_error(priv(trans), false); } } @@ -743,7 +896,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id, int idx) * will be executed. The attached skb (if present) will only be freed * if the callback returns 1 */ -void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) +void iwl_tx_cmd_complete(struct iwl_trans *trans, struct iwl_rx_mem_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); u16 sequence = le16_to_cpu(pkt->hdr.sequence); @@ -752,18 +905,19 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) int cmd_index; struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; - struct iwl_tx_queue *txq = &priv->txq[priv->cmd_queue]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue *txq = &trans_pcie->txq[trans->shrd->cmd_queue]; unsigned long flags; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != priv->cmd_queue, + if (WARN(txq_id != trans->shrd->cmd_queue, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, priv->cmd_queue, sequence, - priv->txq[priv->cmd_queue].q.read_ptr, - priv->txq[priv->cmd_queue].q.write_ptr)) { - iwl_print_hex_error(priv, pkt, 32); + txq_id, trans->shrd->cmd_queue, sequence, + trans_pcie->txq[trans->shrd->cmd_queue].q.read_ptr, + trans_pcie->txq[trans->shrd->cmd_queue].q.write_ptr)) { + iwl_print_hex_error(trans, pkt, 32); return; } @@ -773,121 +927,40 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) txq->time_stamp = jiffies; - iwlagn_unmap_tfd(priv, meta, &txq->tfds[index], DMA_BIDIRECTIONAL); + iwlagn_unmap_tfd(trans, meta, &txq->tfds[index], + DMA_BIDIRECTIONAL); /* Input error checking is done when commands are added to queue. */ if (meta->flags & CMD_WANT_SKB) { meta->source->reply_page = (unsigned long)rxb_addr(rxb); rxb->page = NULL; } else if (meta->callback) - meta->callback(priv, cmd, pkt); + meta->callback(trans->shrd, cmd, pkt); - spin_lock_irqsave(&priv->hcmd_lock, flags); + spin_lock_irqsave(&trans->hcmd_lock, flags); - iwl_hcmd_queue_reclaim(priv, txq_id, index); + iwl_hcmd_queue_reclaim(trans, txq_id, index); if (!(meta->flags & CMD_ASYNC)) { - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", + clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->hdr.cmd)); - wake_up_interruptible(&priv->wait_command_queue); + wake_up_interruptible(&trans->shrd->wait_command_queue); } meta->flags = 0; - spin_unlock_irqrestore(&priv->hcmd_lock, flags); -} - -const char *get_cmd_string(u8 cmd) -{ - switch (cmd) { - IWL_CMD(REPLY_ALIVE); - IWL_CMD(REPLY_ERROR); - IWL_CMD(REPLY_RXON); - IWL_CMD(REPLY_RXON_ASSOC); - IWL_CMD(REPLY_QOS_PARAM); - IWL_CMD(REPLY_RXON_TIMING); - IWL_CMD(REPLY_ADD_STA); - IWL_CMD(REPLY_REMOVE_STA); - IWL_CMD(REPLY_REMOVE_ALL_STA); - IWL_CMD(REPLY_TXFIFO_FLUSH); - IWL_CMD(REPLY_WEPKEY); - IWL_CMD(REPLY_TX); - IWL_CMD(REPLY_LEDS_CMD); - IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); - IWL_CMD(COEX_PRIORITY_TABLE_CMD); - IWL_CMD(COEX_MEDIUM_NOTIFICATION); - IWL_CMD(COEX_EVENT_CMD); - IWL_CMD(REPLY_QUIET_CMD); - IWL_CMD(REPLY_CHANNEL_SWITCH); - IWL_CMD(CHANNEL_SWITCH_NOTIFICATION); - IWL_CMD(REPLY_SPECTRUM_MEASUREMENT_CMD); - IWL_CMD(SPECTRUM_MEASURE_NOTIFICATION); - IWL_CMD(POWER_TABLE_CMD); - IWL_CMD(PM_SLEEP_NOTIFICATION); - IWL_CMD(PM_DEBUG_STATISTIC_NOTIFIC); - IWL_CMD(REPLY_SCAN_CMD); - IWL_CMD(REPLY_SCAN_ABORT_CMD); - IWL_CMD(SCAN_START_NOTIFICATION); - IWL_CMD(SCAN_RESULTS_NOTIFICATION); - IWL_CMD(SCAN_COMPLETE_NOTIFICATION); - IWL_CMD(BEACON_NOTIFICATION); - IWL_CMD(REPLY_TX_BEACON); - IWL_CMD(WHO_IS_AWAKE_NOTIFICATION); - IWL_CMD(QUIET_NOTIFICATION); - IWL_CMD(REPLY_TX_PWR_TABLE_CMD); - IWL_CMD(MEASURE_ABORT_NOTIFICATION); - IWL_CMD(REPLY_BT_CONFIG); - IWL_CMD(REPLY_STATISTICS_CMD); - IWL_CMD(STATISTICS_NOTIFICATION); - IWL_CMD(REPLY_CARD_STATE_CMD); - IWL_CMD(CARD_STATE_NOTIFICATION); - IWL_CMD(MISSED_BEACONS_NOTIFICATION); - IWL_CMD(REPLY_CT_KILL_CONFIG_CMD); - IWL_CMD(SENSITIVITY_CMD); - IWL_CMD(REPLY_PHY_CALIBRATION_CMD); - IWL_CMD(REPLY_RX_PHY_CMD); - IWL_CMD(REPLY_RX_MPDU_CMD); - IWL_CMD(REPLY_RX); - IWL_CMD(REPLY_COMPRESSED_BA); - IWL_CMD(CALIBRATION_CFG_CMD); - IWL_CMD(CALIBRATION_RES_NOTIFICATION); - IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); - IWL_CMD(REPLY_TX_POWER_DBM_CMD); - IWL_CMD(TEMPERATURE_NOTIFICATION); - IWL_CMD(TX_ANT_CONFIGURATION_CMD); - IWL_CMD(REPLY_BT_COEX_PROFILE_NOTIF); - IWL_CMD(REPLY_BT_COEX_PRIO_TABLE); - IWL_CMD(REPLY_BT_COEX_PROT_ENV); - IWL_CMD(REPLY_WIPAN_PARAMS); - IWL_CMD(REPLY_WIPAN_RXON); - IWL_CMD(REPLY_WIPAN_RXON_TIMING); - IWL_CMD(REPLY_WIPAN_RXON_ASSOC); - IWL_CMD(REPLY_WIPAN_QOS_PARAM); - IWL_CMD(REPLY_WIPAN_WEPKEY); - IWL_CMD(REPLY_WIPAN_P2P_CHANNEL_SWITCH); - IWL_CMD(REPLY_WIPAN_NOA_NOTIFICATION); - IWL_CMD(REPLY_WIPAN_DEACTIVATION_COMPLETE); - IWL_CMD(REPLY_WOWLAN_PATTERNS); - IWL_CMD(REPLY_WOWLAN_WAKEUP_FILTER); - IWL_CMD(REPLY_WOWLAN_TSC_RSC_PARAMS); - IWL_CMD(REPLY_WOWLAN_TKIP_PARAMS); - IWL_CMD(REPLY_WOWLAN_KEK_KCK_MATERIAL); - IWL_CMD(REPLY_WOWLAN_GET_STATUS); - default: - return "UNKNOWN"; - - } + spin_unlock_irqrestore(&trans->hcmd_lock, flags); } #define HOST_COMPLETE_TIMEOUT (2 * HZ) -static void iwl_generic_cmd_callback(struct iwl_priv *priv, +static void iwl_generic_cmd_callback(struct iwl_shared *shrd, struct iwl_device_cmd *cmd, struct iwl_rx_packet *pkt) { if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { - IWL_ERR(priv, "Bad return from %s (0x%08X)\n", + IWL_ERR(shrd->trans, "Bad return from %s (0x%08X)\n", get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); return; } @@ -896,17 +969,17 @@ static void iwl_generic_cmd_callback(struct iwl_priv *priv, switch (cmd->hdr.cmd) { case REPLY_TX_LINK_QUALITY_CMD: case SENSITIVITY_CMD: - IWL_DEBUG_HC_DUMP(priv, "back from %s (0x%08X)\n", + IWL_DEBUG_HC_DUMP(shrd->trans, "back from %s (0x%08X)\n", get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); break; default: - IWL_DEBUG_HC(priv, "back from %s (0x%08X)\n", + IWL_DEBUG_HC(shrd->trans, "back from %s (0x%08X)\n", get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); } #endif } -static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +static int iwl_send_cmd_async(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { int ret; @@ -918,77 +991,78 @@ static int iwl_send_cmd_async(struct iwl_priv *priv, struct iwl_host_cmd *cmd) if (!cmd->callback) cmd->callback = iwl_generic_cmd_callback; - if (test_bit(STATUS_EXIT_PENDING, &priv->status)) + if (test_bit(STATUS_EXIT_PENDING, &trans->shrd->status)) return -EBUSY; - ret = iwl_enqueue_hcmd(priv, cmd); + ret = iwl_enqueue_hcmd(trans, cmd); if (ret < 0) { - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } return 0; } -static int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +static int iwl_send_cmd_sync(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cmd_idx; int ret; - lockdep_assert_held(&priv->mutex); + lockdep_assert_held(&trans->shrd->mutex); /* A synchronous command can not have a callback set. */ if (WARN_ON(cmd->callback)) return -EINVAL; - IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n", + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", get_cmd_string(cmd->id)); - set_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n", + set_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", get_cmd_string(cmd->id)); - cmd_idx = iwl_enqueue_hcmd(priv, cmd); + cmd_idx = iwl_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_ERR(priv, "Error sending %s: enqueue_hcmd failed: %d\n", + clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", get_cmd_string(cmd->id), ret); return ret; } - ret = wait_event_interruptible_timeout(priv->wait_command_queue, - !test_bit(STATUS_HCMD_ACTIVE, &priv->status), + ret = wait_event_interruptible_timeout(trans->shrd->wait_command_queue, + !test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status), HOST_COMPLETE_TIMEOUT); if (!ret) { - if (test_bit(STATUS_HCMD_ACTIVE, &priv->status)) { - IWL_ERR(priv, + if (test_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status)) { + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", get_cmd_string(cmd->id), jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - clear_bit(STATUS_HCMD_ACTIVE, &priv->status); - IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command" + clear_bit(STATUS_HCMD_ACTIVE, &trans->shrd->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command" "%s\n", get_cmd_string(cmd->id)); ret = -ETIMEDOUT; goto cancel; } } - if (test_bit(STATUS_RF_KILL_HW, &priv->status)) { - IWL_ERR(priv, "Command %s aborted: RF KILL Switch\n", + if (test_bit(STATUS_RF_KILL_HW, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s aborted: RF KILL Switch\n", get_cmd_string(cmd->id)); ret = -ECANCELED; goto fail; } - if (test_bit(STATUS_FW_ERROR, &priv->status)) { - IWL_ERR(priv, "Command %s failed: FW Error\n", + if (test_bit(STATUS_FW_ERROR, &trans->shrd->status)) { + IWL_ERR(trans, "Command %s failed: FW Error\n", get_cmd_string(cmd->id)); ret = -EIO; goto fail; } if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) { - IWL_ERR(priv, "Error: Response NULL in '%s'\n", + IWL_ERR(trans, "Error: Response NULL in '%s'\n", get_cmd_string(cmd->id)); ret = -EIO; goto cancel; @@ -1004,28 +1078,28 @@ cancel: * in later, it will possibly set an invalid * address (cmd->meta.source). */ - priv->txq[priv->cmd_queue].meta[cmd_idx].flags &= + trans_pcie->txq[trans->shrd->cmd_queue].meta[cmd_idx].flags &= ~CMD_WANT_SKB; } fail: if (cmd->reply_page) { - iwl_free_pages(priv, cmd->reply_page); + iwl_free_pages(trans->shrd, cmd->reply_page); cmd->reply_page = 0; } return ret; } -int iwl_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd) +int iwl_trans_pcie_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { if (cmd->flags & CMD_ASYNC) - return iwl_send_cmd_async(priv, cmd); + return iwl_send_cmd_async(trans, cmd); - return iwl_send_cmd_sync(priv, cmd); + return iwl_send_cmd_sync(trans, cmd); } -int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, - const void *data) +int iwl_trans_pcie_send_cmd_pdu(struct iwl_trans *trans, u8 id, u32 flags, + u16 len, const void *data) { struct iwl_host_cmd cmd = { .id = id, @@ -1034,5 +1108,53 @@ int iwl_send_cmd_pdu(struct iwl_priv *priv, u8 id, u32 flags, u16 len, .flags = flags, }; - return iwl_send_cmd(priv, &cmd); + return iwl_trans_pcie_send_cmd(trans, &cmd); +} + +/* Frees buffers until index _not_ inclusive */ +int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, + struct sk_buff_head *skbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue *txq = &trans_pcie->txq[txq_id]; + struct iwl_queue *q = &txq->q; + int last_to_free; + int freed = 0; + + /*Since we free until index _not_ inclusive, the one before index is + * the last we will free. This one must be used */ + last_to_free = iwl_queue_dec_wrap(index, q->n_bd); + + if ((index >= q->n_bd) || + (iwl_queue_used(q, last_to_free) == 0)) { + IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), " + "last_to_free %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, last_to_free, q->n_bd, + q->write_ptr, q->read_ptr); + return 0; + } + + IWL_DEBUG_TX_REPLY(trans, "reclaim: [%d, %d, %d]\n", txq_id, + q->read_ptr, index); + + if (WARN_ON(!skb_queue_empty(skbs))) + return 0; + + for (; + q->read_ptr != index; + q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { + + if (WARN_ON_ONCE(txq->skbs[txq->q.read_ptr] == NULL)) + continue; + + __skb_queue_tail(skbs, txq->skbs[txq->q.read_ptr]); + + txq->skbs[txq->q.read_ptr] = NULL; + + iwlagn_txq_inval_byte_cnt_tbl(trans, txq); + + iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr); + freed++; + } + return freed; } |

