diff options
Diffstat (limited to 'drivers/net/ethernet/intel/iavf/iavf_txrx.c')
| -rw-r--r-- | drivers/net/ethernet/intel/iavf/iavf_txrx.c | 35 | 
1 files changed, 25 insertions, 10 deletions
diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 06d1509d57f7..1cde1601bc32 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -190,7 +190,7 @@ void iavf_detect_recover_hung(struct iavf_vsi *vsi)  static bool iavf_clean_tx_irq(struct iavf_vsi *vsi,  			      struct iavf_ring *tx_ring, int napi_budget)  { -	u16 i = tx_ring->next_to_clean; +	int i = tx_ring->next_to_clean;  	struct iavf_tx_buffer *tx_buf;  	struct iavf_tx_desc *tx_desc;  	unsigned int total_bytes = 0, total_packets = 0; @@ -379,19 +379,19 @@ static inline unsigned int iavf_itr_divisor(struct iavf_q_vector *q_vector)  	unsigned int divisor;  	switch (q_vector->adapter->link_speed) { -	case I40E_LINK_SPEED_40GB: +	case IAVF_LINK_SPEED_40GB:  		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 1024;  		break; -	case I40E_LINK_SPEED_25GB: -	case I40E_LINK_SPEED_20GB: +	case IAVF_LINK_SPEED_25GB: +	case IAVF_LINK_SPEED_20GB:  		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 512;  		break;  	default: -	case I40E_LINK_SPEED_10GB: +	case IAVF_LINK_SPEED_10GB:  		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 256;  		break; -	case I40E_LINK_SPEED_1GB: -	case I40E_LINK_SPEED_100MB: +	case IAVF_LINK_SPEED_1GB: +	case IAVF_LINK_SPEED_100MB:  		divisor = IAVF_ITR_ADAPTIVE_MIN_INC * 32;  		break;  	} @@ -1236,6 +1236,9 @@ static void iavf_add_rx_frag(struct iavf_ring *rx_ring,  	unsigned int truesize = SKB_DATA_ALIGN(size + iavf_rx_offset(rx_ring));  #endif +	if (!size) +		return; +  	skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,  			rx_buffer->page_offset, size, truesize); @@ -1260,6 +1263,9 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring,  {  	struct iavf_rx_buffer *rx_buffer; +	if (!size) +		return NULL; +  	rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean];  	prefetchw(rx_buffer->page); @@ -1299,6 +1305,8 @@ static struct sk_buff *iavf_construct_skb(struct iavf_ring *rx_ring,  	unsigned int headlen;  	struct sk_buff *skb; +	if (!rx_buffer) +		return NULL;  	/* prefetch first cache line of first page */  	prefetch(va);  #if L1_CACHE_BYTES < 128 @@ -1363,6 +1371,8 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,  #endif  	struct sk_buff *skb; +	if (!rx_buffer) +		return NULL;  	/* prefetch first cache line of first page */  	prefetch(va);  #if L1_CACHE_BYTES < 128 @@ -1398,6 +1408,9 @@ static struct sk_buff *iavf_build_skb(struct iavf_ring *rx_ring,  static void iavf_put_rx_buffer(struct iavf_ring *rx_ring,  			       struct iavf_rx_buffer *rx_buffer)  { +	if (!rx_buffer) +		return; +  	if (iavf_can_reuse_rx_page(rx_buffer)) {  		/* hand second half of page back to the ring */  		iavf_reuse_rx_page(rx_ring, rx_buffer); @@ -1496,11 +1509,12 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)  		 * verified the descriptor has been written back.  		 */  		dma_rmb(); +#define IAVF_RXD_DD BIT(IAVF_RX_DESC_STATUS_DD_SHIFT) +		if (!iavf_test_staterr(rx_desc, IAVF_RXD_DD)) +			break;  		size = (qword & IAVF_RXD_QW1_LENGTH_PBUF_MASK) >>  		       IAVF_RXD_QW1_LENGTH_PBUF_SHIFT; -		if (!size) -			break;  		iavf_trace(clean_rx_irq, rx_ring, rx_desc, skb);  		rx_buffer = iavf_get_rx_buffer(rx_ring, size); @@ -1516,7 +1530,8 @@ static int iavf_clean_rx_irq(struct iavf_ring *rx_ring, int budget)  		/* exit if we failed to retrieve a buffer */  		if (!skb) {  			rx_ring->rx_stats.alloc_buff_failed++; -			rx_buffer->pagecnt_bias++; +			if (rx_buffer) +				rx_buffer->pagecnt_bias++;  			break;  		}  | 

