diff options
author | Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com> | 2010-05-13 17:33:21 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-05-13 21:07:33 -0700 |
commit | e8171aaad7ec335b8cbd71f56eb08b545f0c404f (patch) | |
tree | efd76846ee2cf0a266386b97c23130dc55d9c11d /drivers/net | |
parent | e433ea1fb03c10debf101019668b83abed041c24 (diff) | |
download | blackbird-op-linux-e8171aaad7ec335b8cbd71f56eb08b545f0c404f.tar.gz blackbird-op-linux-e8171aaad7ec335b8cbd71f56eb08b545f0c404f.zip |
ixgbe: Use bool flag to see if the packet unmapping is delayed in HWRSC
We can't use zero magic "bad" value to check if IXGBE_RSC_CB(skb)->dma
is valid. It is only valid in x86/arm/m68k/alpha architectures and in
spark, powerPC and other architectures it should be ~0. As per
Benjamin Herrenschmidt feedback use a bool flag to decide if
the packet unmapping is delayed in hardware RSC till EOP is reached
Signed-off-by: Mallikarjuna R Chilakala <mallikarjuna.chilakala@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/ixgbe/ixgbe_main.c | 17 |
1 files changed, 11 insertions, 6 deletions
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index 15032c79e003..3fb9f23c7502 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c @@ -1160,6 +1160,7 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, struct ixgbe_rsc_cb { dma_addr_t dma; + bool delay_unmap; }; #define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb) @@ -1215,7 +1216,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (rx_buffer_info->dma) { if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && (!(staterr & IXGBE_RXD_STAT_EOP)) && - (!(skb->prev))) + (!(skb->prev))) { /* * When HWRSC is enabled, delay unmapping * of the first packet. It carries the @@ -1223,12 +1224,14 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, * access the header after the writeback. * Only unmap it when EOP is reached */ + IXGBE_RSC_CB(skb)->delay_unmap = true; IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; - else + } else { dma_unmap_single(&pdev->dev, - rx_buffer_info->dma, + rx_buffer_info->dma, rx_ring->rx_buf_len, - DMA_FROM_DEVICE); + DMA_FROM_DEVICE); + } rx_buffer_info->dma = 0; skb_put(skb, len); } @@ -1276,12 +1279,13 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, if (skb->prev) skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count)); if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { - if (IXGBE_RSC_CB(skb)->dma) { + if (IXGBE_RSC_CB(skb)->delay_unmap) { dma_unmap_single(&pdev->dev, IXGBE_RSC_CB(skb)->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); IXGBE_RSC_CB(skb)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; } if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) rx_ring->rsc_count += skb_shinfo(skb)->nr_frags; @@ -3505,12 +3509,13 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, rx_buffer_info->skb = NULL; do { struct sk_buff *this = skb; - if (IXGBE_RSC_CB(this)->dma) { + if (IXGBE_RSC_CB(this)->delay_unmap) { dma_unmap_single(&pdev->dev, IXGBE_RSC_CB(this)->dma, rx_ring->rx_buf_len, DMA_FROM_DEVICE); IXGBE_RSC_CB(this)->dma = 0; + IXGBE_RSC_CB(skb)->delay_unmap = false; } skb = skb->prev; dev_kfree_skb(this); |