diff options
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/pcie/tx.c')
-rw-r--r-- | drivers/net/wireless/intel/iwlwifi/pcie/tx.c | 258 |
1 files changed, 123 insertions, 135 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 2f0ba7ef53b8..2f69a6469fe7 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -65,7 +65,6 @@ #include <linux/ieee80211.h> #include <linux/slab.h> #include <linux/sched.h> -#include <linux/pm_runtime.h> #include <net/ip6_checksum.h> #include <net/tso.h> @@ -114,17 +113,17 @@ int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ - if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) + if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) max = q->n_window; else - max = trans->cfg->base_params->max_tfd_queue_size - 1; + max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; /* * max_tfd_queue_size is a power of 2, so the following is equivalent to * modulo by max_tfd_queue_size and is well defined. */ used = (q->write_ptr - q->read_ptr) & - (trans->cfg->base_params->max_tfd_queue_size - 1); + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; @@ -214,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -258,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -293,7 +292,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 2. NIC is woken up for CMD regardless of shadow outside this function * 3. there is a chance that the NIC is asleep */ - if (!trans->cfg->base_params->shadow_reg_enable && + if (!trans->trans_cfg->base_params->shadow_reg_enable && txq_id != trans_pcie->cmd_queue && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* @@ -307,7 +306,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", txq_id, reg); iwl_set_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); txq->need_update = true; return; } @@ -328,7 +327,7 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; - for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) { + for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { struct iwl_txq *txq = trans_pcie->txq[i]; if (!test_bit(i, trans_pcie->queue_used)) @@ -347,7 +346,7 @@ static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans, void *_tfd, u8 idx) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; struct iwl_tfh_tb *tb = &tfd->tbs[idx]; @@ -390,7 +389,7 @@ static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd) { - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd = _tfd; return le16_to_cpu(tfd->num_tbs) & 0x1f; @@ -437,7 +436,7 @@ static void iwl_pcie_tfd_unmap(struct iwl_trans *trans, meta->tbs = 0; - if (trans->cfg->use_tfh) { + if (trans->trans_cfg->use_tfh) { struct iwl_tfh_tfd *tfd_fh = (void *)tfd; tfd_fh->num_tbs = 0; @@ -525,14 +524,14 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); size_t tfd_sz = trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size; + trans->trans_cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; if (WARN_ON(txq->entries || txq->tfds)) return -EINVAL; - if (trans->cfg->use_tfh) + if (trans->trans_cfg->use_tfh) tfd_sz = trans_pcie->tfd_size * slots_num; timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0); @@ -591,7 +590,8 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; - u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; + u32 tfd_queue_max_size = + trans->trans_cfg->base_params->max_tfd_queue_size; txq->need_update = false; @@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb) { struct page **page_ptr; + struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + next = *page_ptr; + *page_ptr = NULL; + + while (next) { + struct page *tmp = next; - if (*page_ptr) { - __free_page(*page_ptr); - *page_ptr = NULL; + next = *(void **)(page_address(next) + PAGE_SIZE - + sizeof(void *)); + __free_page(tmp); } } @@ -639,20 +645,14 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) lockdep_assert_held(&trans_pcie->reg_lock); - if (trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = false; - IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n"); - iwl_trans_unref(trans); - } - - if (!trans->cfg->base_params->apmg_wake_up_wa) + if (!trans->trans_cfg->base_params->apmg_wake_up_wa) return; if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) return; trans_pcie->cmd_hold_nic_awake = false; __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(trans->cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); } /* @@ -683,13 +683,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) unsigned long flags; spin_lock_irqsave(&trans_pcie->reg_lock, flags); - if (txq_id != trans_pcie->cmd_queue) { - IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n", - txq->id); - iwl_trans_unref(trans); - } else { + if (txq_id == trans_pcie->cmd_queue) iwl_pcie_clear_cmd_in_flight(trans); - } spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); } } @@ -737,7 +732,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) if (txq->tfds) { dma_free_coherent(dev, trans_pcie->tfd_size * - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -759,7 +754,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - int nq = trans->cfg->base_params->num_of_queues; + int nq = trans->trans_cfg->base_params->num_of_queues; int chan; u32 reg_val; int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - @@ -786,7 +781,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. */ - if (trans->cfg->base_params->scd_chain_ext_wa) + if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue, @@ -808,7 +803,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); /* Enable L1-Active */ - if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000) + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, APMG_PCIDEV_STT_VAL_L1_ACT_DIS); } @@ -822,13 +817,13 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) * we should never get here in gen2 trans mode return early to avoid * having invalid accesses */ - if (WARN_ON_ONCE(trans->cfg->gen2)) + if (WARN_ON_ONCE(trans->trans_cfg->gen2)) return; - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { struct iwl_txq *txq = trans_pcie->txq[txq_id]; - if (trans->cfg->use_tfh) + if (trans->trans_cfg->use_tfh) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), txq->dma_addr); @@ -911,7 +906,7 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) return 0; /* Unmap DMA from host system and free skb's */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) iwl_pcie_txq_unmap(trans, txq_id); @@ -933,7 +928,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) /* Tx queues */ if (trans_pcie->txq_memory) { for (txq_id = 0; - txq_id < trans->cfg->base_params->num_of_queues; + txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); trans_pcie->txq[txq_id] = NULL; @@ -957,9 +952,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; + u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; - bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + bc_tbls_size *= (trans->trans_cfg->device_family >= + IWL_DEVICE_FAMILY_AX210) ? sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl); @@ -984,8 +980,9 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues, - sizeof(struct iwl_txq), GFP_KERNEL); + trans_pcie->txq_memory = + kcalloc(trans->trans_cfg->base_params->num_of_queues, + sizeof(struct iwl_txq), GFP_KERNEL); if (!trans_pcie->txq_memory) { IWL_ERR(trans, "Not enough memory for txq\n"); ret = -ENOMEM; @@ -993,7 +990,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); @@ -1047,7 +1044,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) spin_unlock(&trans_pcie->irq_lock); /* Alloc and init all Tx queues, including the command queue (#4/#9) */ - for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues; + for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { bool cmd_queue = (txq_id == trans_pcie->cmd_queue); @@ -1075,7 +1072,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); - if (trans->cfg->base_params->num_of_queues > 20) + if (trans->trans_cfg->base_params->num_of_queues > 20) iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_ENABLE_31_QUEUES); @@ -1147,7 +1144,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, IWL_ERR(trans, "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", __func__, txq_id, last_to_free, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } @@ -1170,7 +1167,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->entries[read_ptr].skb = NULL; - if (!trans->cfg->use_tfh) + if (!trans->trans_cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_free_tfd(trans, txq); @@ -1205,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -1225,20 +1222,28 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, txq->overflow_tx = false; } - if (txq->read_ptr == txq->write_ptr) { - IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id); - iwl_trans_unref(trans); - } - out: spin_unlock_bh(&txq->lock); } +/* Set wr_ptr of specific device and txq */ +void iwl_trans_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + + spin_unlock_bh(&txq->lock); +} + static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, const struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - const struct iwl_cfg *cfg = trans->cfg; int ret; lockdep_assert_held(&trans_pcie->reg_lock); @@ -1247,32 +1252,25 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!(cmd->flags & CMD_SEND_IN_IDLE) && - !trans_pcie->ref_cmd_in_flight) { - trans_pcie->ref_cmd_in_flight = true; - IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n"); - iwl_trans_ref(trans); - } - /* * wake up the NIC to make sure that the firmware will see the host * command - we will let the NIC sleep once all the host commands * returned. This needs to be done only on NICs that have * apmg_wake_up_wa set. */ - if (cfg->base_params->apmg_wake_up_wa && + if (trans->trans_cfg->base_params->apmg_wake_up_wa && !trans_pcie->cmd_hold_nic_awake) { __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_poll_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_val_mac_access_en), - (BIT(cfg->csr->flag_mac_clock_ready) | + CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, + (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000); if (ret < 0) { __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, - BIT(cfg->csr->flag_mac_access_req)); + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); return -EIO; } @@ -1302,12 +1300,12 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) idx = iwl_pcie_get_cmd_index(txq, idx); r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - if (idx >= trans->cfg->base_params->max_tfd_queue_size || + if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, - trans->cfg->base_params->max_tfd_queue_size, + trans->trans_cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } @@ -1421,7 +1419,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, * this sad hardware issue. * This bug has been fixed on devices 9000 and up. */ - scd_bug = !trans->cfg->mq_rx_supported && + scd_bug = !trans->trans_cfg->mq_rx_supported && !((ssn - txq->write_ptr) & 0x3f) && (ssn != txq->write_ptr); if (scd_bug) @@ -1867,20 +1865,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, wake_up(&trans_pcie->wait_command_queue); } - if (meta->flags & CMD_MAKE_TRANS_IDLE) { - IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - set_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); - } - - if (meta->flags & CMD_WAKE_UP_TRANS) { - IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n", - iwl_get_cmd_string(trans, cmd->hdr.cmd)); - clear_bit(STATUS_TRANS_IDLE, &trans->status); - wake_up(&trans_pcie->d0i3_waitq); - } - meta->flags = 0; spin_unlock_bh(&txq->lock); @@ -1927,16 +1911,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", iwl_get_cmd_string(trans, cmd->id)); - if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) { - ret = wait_event_timeout(trans_pcie->d0i3_waitq, - pm_runtime_active(&trans_pcie->pci_dev->dev), - msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT)); - if (!ret) { - IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n"); - return -ETIMEDOUT; - } - } - cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); if (cmd_idx < 0) { ret = cmd_idx; @@ -2051,9 +2025,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, head_tb_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - trace_iwlwifi_dev_tx_tb(trans->dev, skb, - skb->data + hdr_len, - head_tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, + tb_phys, head_tb_len); iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); } @@ -2071,9 +2044,8 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, if (unlikely(dma_mapping_error(trans->dev, tb_phys))) return -EINVAL; - trace_iwlwifi_dev_tx_tb(trans->dev, skb, - skb_frag_address(frag), - skb_frag_size(frag)); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), + tb_phys, skb_frag_size(frag)); tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, skb_frag_size(frag), false); if (tb_idx < 0) @@ -2086,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); + struct page **page_ptr; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; if (!p->page) goto alloc; - /* enough room on this page */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) - return p; + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - + sizeof(void *)) + goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); @@ -2106,6 +2095,11 @@ alloc: if (!p->page) return NULL; p->pos = page_address(p->page); + /* set the chaining pointer to NULL */ + *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; +out: + *page_ptr = p->page; + get_page(p->page); return p; } @@ -2131,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; @@ -2141,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2164,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2254,7 +2245,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, - hdr_tb_len); + hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); @@ -2280,7 +2271,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, iwl_pcie_txq_build_tfd(trans, txq, tb_phys, size, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, - size); + tb_phys, size); data_left -= size; tso_build_data(skb, &tso, size); @@ -2313,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2323,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2380,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -2504,22 +2496,18 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, wait_write_ptr = ieee80211_has_morefrags(fc); /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr) { - if (txq->wd_timeout) { - /* - * If the TXQ is active, then set the timer, if not, - * set the timer in remainder so that the timer will - * be armed with the right value when the station will - * wake up. - */ - if (!txq->frozen) - mod_timer(&txq->stuck_timer, - jiffies + txq->wd_timeout); - else - txq->frozen_expiry_remainder = txq->wd_timeout; - } - IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id); - iwl_trans_ref(trans); + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { + /* + * If the TXQ is active, then set the timer, if not, + * set the timer in remainder so that the timer will + * be armed with the right value when the station will + * wake up. + */ + if (!txq->frozen) + mod_timer(&txq->stuck_timer, + jiffies + txq->wd_timeout); + else + txq->frozen_expiry_remainder = txq->wd_timeout; } /* Tell device the write index *just past* this latest filled TFD */ |