diff options
author | Zhu Yi <yi.zhu@intel.com> | 2009-10-09 17:19:45 +0800 |
---|---|---|
committer | John W. Linville <linville@tuxdriver.com> | 2009-10-27 16:48:06 -0400 |
commit | 2f301227a1ede57504694e1f64839839f5737cac (patch) | |
tree | c148ca6c3409f5f8fed4455fba3a78fe31469135 /drivers/net/wireless/iwlwifi/iwl-rx.c | |
parent | ae751bab9f55c3152ebf713c89a4fb6f439c2575 (diff) | |
download | blackbird-op-linux-2f301227a1ede57504694e1f64839839f5737cac.tar.gz blackbird-op-linux-2f301227a1ede57504694e1f64839839f5737cac.zip |
iwlwifi: use paged Rx
This switches the iwlwifi driver to use paged skb from linear skb for Rx
buffer. So that it relieves some Rx buffer allocation pressure for the
memory subsystem. Currently iwlwifi (4K for 3945) requests 8K bytes for
Rx buffer. Due to the trailing skb_shared_info in the skb->data,
alloc_skb() will do the next order allocation, which is 16K bytes. This
is suboptimal and more likely to fail when the system is under memory
usage pressure. Switching to paged Rx skb lets us allocate the RXB
directly by alloc_pages(), so that only order 1 allocation is required.
It also adjusts the area spin_lock (with IRQ disabled) protected in the
tasklet because tasklet guarentees to run only on one CPU and the new
unprotected code can be preempted by the IRQ handler. This saves us from
spawning another workqueue to make skb_linearize/__pskb_pull_tail happy
(which cannot be called in hard irq context).
Finally, mac80211 doesn't support paged Rx yet. So we linearize the skb
for all the management frames and software decryption or defragmentation
required data frames before handed to mac80211. For all the other frames,
we __pskb_pull_tail 64 bytes in the linear area of the skb for mac80211
to handle them properly.
Signed-off-by: Zhu Yi <yi.zhu@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-rx.c')
-rw-r--r-- | drivers/net/wireless/iwlwifi/iwl-rx.c | 122 |
1 files changed, 76 insertions, 46 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c index 7ad327ef9cb5..0a407f79de01 100644 --- a/drivers/net/wireless/iwlwifi/iwl-rx.c +++ b/drivers/net/wireless/iwlwifi/iwl-rx.c @@ -200,7 +200,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv) list_del(element); /* Point to Rx buffer via next RBD in circular buffer */ - rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); + rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma); rxq->queue[rxq->write] = rxb; rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; rxq->free_count--; @@ -239,7 +239,7 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) struct iwl_rx_queue *rxq = &priv->rxq; struct list_head *element; struct iwl_rx_mem_buffer *rxb; - struct sk_buff *skb; + struct page *page; unsigned long flags; while (1) { @@ -252,29 +252,34 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) if (rxq->free_count > RX_LOW_WATERMARK) priority |= __GFP_NOWARN; - /* Alloc a new receive buffer */ - skb = alloc_skb(priv->hw_params.rx_buf_size + 256, - priority); - if (!skb) { + if (priv->hw_params.rx_page_order > 0) + priority |= __GFP_COMP; + + /* Alloc a new receive buffer */ + page = alloc_pages(priority, priv->hw_params.rx_page_order); + if (!page) { if (net_ratelimit()) - IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); + IWL_DEBUG_INFO(priv, "alloc_pages failed, " + "order: %d\n", + priv->hw_params.rx_page_order); + if ((rxq->free_count <= RX_LOW_WATERMARK) && net_ratelimit()) - IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", + IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n", priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", rxq->free_count); /* We don't reschedule replenish work here -- we will * call the restock method and if it still needs * more buffers it will schedule replenish */ - break; + return; } spin_lock_irqsave(&rxq->lock, flags); if (list_empty(&rxq->rx_used)) { spin_unlock_irqrestore(&rxq->lock, flags); - dev_kfree_skb_any(skb); + __free_pages(page, priv->hw_params.rx_page_order); return; } element = rxq->rx_used.next; @@ -283,24 +288,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority) spin_unlock_irqrestore(&rxq->lock, flags); - rxb->skb = skb; - /* Get physical address of RB/SKB */ - rxb->real_dma_addr = pci_map_single( - priv->pci_dev, - rxb->skb->data, - priv->hw_params.rx_buf_size + 256, - PCI_DMA_FROMDEVICE); + rxb->page = page; + /* Get physical address of the RB */ + rxb->page_dma = pci_map_page(priv->pci_dev, page, 0, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); /* dma address must be no more than 36 bits */ - BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); + BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36)); /* and also 256 byte aligned! */ - rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); - skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr); + BUG_ON(rxb->page_dma & DMA_BIT_MASK(8)); spin_lock_irqsave(&rxq->lock, flags); list_add_tail(&rxb->list, &rxq->rx_free); rxq->free_count++; - priv->alloc_rxb_skb++; + priv->alloc_rxb_page++; spin_unlock_irqrestore(&rxq->lock, flags); } @@ -336,12 +338,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq) { int i; for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { - if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, - rxq->pool[i].real_dma_addr, - priv->hw_params.rx_buf_size + 256, - PCI_DMA_FROMDEVICE); - dev_kfree_skb(rxq->pool[i].skb); + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + __free_pages(rxq->pool[i].page, + priv->hw_params.rx_page_order); + rxq->pool[i].page = NULL; + priv->alloc_rxb_page--; } } @@ -405,14 +409,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq) for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { /* In the reset function, these buffers may have been allocated * to an SKB, so we need to unmap and free potential storage */ - if (rxq->pool[i].skb != NULL) { - pci_unmap_single(priv->pci_dev, - rxq->pool[i].real_dma_addr, - priv->hw_params.rx_buf_size + 256, - PCI_DMA_FROMDEVICE); - priv->alloc_rxb_skb--; - dev_kfree_skb(rxq->pool[i].skb); - rxq->pool[i].skb = NULL; + if (rxq->pool[i].page != NULL) { + pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma, + PAGE_SIZE << priv->hw_params.rx_page_order, + PCI_DMA_FROMDEVICE); + priv->alloc_rxb_page--; + __free_pages(rxq->pool[i].page, + priv->hw_params.rx_page_order); + rxq->pool[i].page = NULL; } list_add_tail(&rxq->pool[i].list, &rxq->rx_used); } @@ -491,7 +495,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_missed_beacon_notif *missed_beacon; missed_beacon = &pkt->u.missed_beacon; @@ -592,7 +596,7 @@ void iwl_rx_statistics(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { int change; - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", (int)sizeof(priv->statistics), @@ -919,6 +923,9 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb, struct ieee80211_rx_status *stats) { + struct sk_buff *skb; + int ret = 0; + /* We only process data packets if the interface is open */ if (unlikely(!priv->is_open)) { IWL_DEBUG_DROP_LIMIT(priv, @@ -931,15 +938,38 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv, iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) return; - /* Resize SKB from mac header to end of packet */ - skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); - skb_put(rxb->skb, len); + skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC); + if (!skb) { + IWL_ERR(priv, "alloc_skb failed\n"); + return; + } + + skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len); + + /* mac80211 currently doesn't support paged SKB. Convert it to + * linear SKB for management frame and data frame requires + * software decryption or software defragementation. */ + if (ieee80211_is_mgmt(hdr->frame_control) || + ieee80211_has_protected(hdr->frame_control) || + ieee80211_has_morefrags(hdr->frame_control) || + le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) + ret = skb_linearize(skb); + else + ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ? + 0 : -ENOMEM; + + if (ret) { + kfree_skb(skb); + goto out; + } iwl_update_stats(priv, false, hdr->frame_control, len); - memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); - ieee80211_rx_irqsafe(priv->hw, rxb->skb); - priv->alloc_rxb_skb--; - rxb->skb = NULL; + memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); + + ieee80211_rx(priv->hw, skb); + out: + priv->alloc_rxb_page--; + rxb->page = NULL; } /* This is necessary only for a number of statistics, see the caller. */ @@ -967,7 +997,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv, { struct ieee80211_hdr *header; struct ieee80211_rx_status rx_status; - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_phy_res *phy_res; __le32 rx_pkt_status; struct iwl4965_rx_mpdu_res_start *amsdu; @@ -1128,7 +1158,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx); void iwl_rx_reply_rx_phy(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) { - struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; + struct iwl_rx_packet *pkt = rxb_addr(rxb); priv->last_phy_res[0] = 1; memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), sizeof(struct iwl_rx_phy_res)); |