diff options
Diffstat (limited to 'drivers/net/ethernet/cisco/enic/enic_main.c')
| -rw-r--r-- | drivers/net/ethernet/cisco/enic/enic_main.c | 179 | 
1 files changed, 116 insertions, 63 deletions
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index e356afa44e7d..9cbe038a388e 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -45,6 +45,7 @@  #ifdef CONFIG_NET_RX_BUSY_POLL  #include <net/busy_poll.h>  #endif +#include <linux/crash_dump.h>  #include "cq_enet_desc.h"  #include "vnic_dev.h" @@ -88,7 +89,7 @@ MODULE_DEVICE_TABLE(pci, enic_id_table);   *  coalescing timer values   *  {rx_rate in Mbps, mapping percentage of the range}   */ -struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = { +static struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {  	{4000,  0},  	{4400, 10},  	{5060, 20}, @@ -105,7 +106,7 @@ struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {  /* This table helps the driver to pick different ranges for rx coalescing   * timer depending on the link speed.   */ -struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = { +static struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {  	{0,  0}, /* 0  - 4  Gbps */  	{0,  3}, /* 4  - 10 Gbps */  	{3,  6}, /* 10 - 40 Gbps */ @@ -351,80 +352,94 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data)  	return IRQ_HANDLED;  } -static inline void enic_queue_wq_skb_cont(struct enic *enic, -	struct vnic_wq *wq, struct sk_buff *skb, -	unsigned int len_left, int loopback) +static int enic_queue_wq_skb_cont(struct enic *enic, struct vnic_wq *wq, +				  struct sk_buff *skb, unsigned int len_left, +				  int loopback)  {  	const skb_frag_t *frag; +	dma_addr_t dma_addr;  	/* Queue additional data fragments */  	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {  		len_left -= skb_frag_size(frag); -		enic_queue_wq_desc_cont(wq, skb, -			skb_frag_dma_map(&enic->pdev->dev, -					 frag, 0, skb_frag_size(frag), -					 DMA_TO_DEVICE), -			skb_frag_size(frag), -			(len_left == 0),	/* EOP? */ -			loopback); +		dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag, 0, +					    skb_frag_size(frag), +					    DMA_TO_DEVICE); +		if (unlikely(enic_dma_map_check(enic, dma_addr))) +			return -ENOMEM; +		enic_queue_wq_desc_cont(wq, skb, dma_addr, skb_frag_size(frag), +					(len_left == 0),	/* EOP? */ +					loopback);  	} + +	return 0;  } -static inline void enic_queue_wq_skb_vlan(struct enic *enic, -	struct vnic_wq *wq, struct sk_buff *skb, -	int vlan_tag_insert, unsigned int vlan_tag, int loopback) +static int enic_queue_wq_skb_vlan(struct enic *enic, struct vnic_wq *wq, +				  struct sk_buff *skb, int vlan_tag_insert, +				  unsigned int vlan_tag, int loopback)  {  	unsigned int head_len = skb_headlen(skb);  	unsigned int len_left = skb->len - head_len;  	int eop = (len_left == 0); +	dma_addr_t dma_addr; +	int err = 0; + +	dma_addr = pci_map_single(enic->pdev, skb->data, head_len, +				  PCI_DMA_TODEVICE); +	if (unlikely(enic_dma_map_check(enic, dma_addr))) +		return -ENOMEM;  	/* Queue the main skb fragment. The fragments are no larger  	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less  	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor  	 * per fragment is queued.  	 */ -	enic_queue_wq_desc(wq, skb, -		pci_map_single(enic->pdev, skb->data, -			head_len, PCI_DMA_TODEVICE), -		head_len, -		vlan_tag_insert, vlan_tag, -		eop, loopback); +	enic_queue_wq_desc(wq, skb, dma_addr, head_len,	vlan_tag_insert, +			   vlan_tag, eop, loopback);  	if (!eop) -		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); +		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + +	return err;  } -static inline void enic_queue_wq_skb_csum_l4(struct enic *enic, -	struct vnic_wq *wq, struct sk_buff *skb, -	int vlan_tag_insert, unsigned int vlan_tag, int loopback) +static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq, +				     struct sk_buff *skb, int vlan_tag_insert, +				     unsigned int vlan_tag, int loopback)  {  	unsigned int head_len = skb_headlen(skb);  	unsigned int len_left = skb->len - head_len;  	unsigned int hdr_len = skb_checksum_start_offset(skb);  	unsigned int csum_offset = hdr_len + skb->csum_offset;  	int eop = (len_left == 0); +	dma_addr_t dma_addr; +	int err = 0; + +	dma_addr = pci_map_single(enic->pdev, skb->data, head_len, +				  PCI_DMA_TODEVICE); +	if (unlikely(enic_dma_map_check(enic, dma_addr))) +		return -ENOMEM;  	/* Queue the main skb fragment. The fragments are no larger  	 * than max MTU(9000)+ETH_HDR_LEN(14) bytes, which is less  	 * than WQ_ENET_MAX_DESC_LEN length. So only one descriptor  	 * per fragment is queued.  	 */ -	enic_queue_wq_desc_csum_l4(wq, skb, -		pci_map_single(enic->pdev, skb->data, -			head_len, PCI_DMA_TODEVICE), -		head_len, -		csum_offset, -		hdr_len, -		vlan_tag_insert, vlan_tag, -		eop, loopback); +	enic_queue_wq_desc_csum_l4(wq, skb, dma_addr, head_len,	csum_offset, +				   hdr_len, vlan_tag_insert, vlan_tag, eop, +				   loopback);  	if (!eop) -		enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); +		err = enic_queue_wq_skb_cont(enic, wq, skb, len_left, loopback); + +	return err;  } -static inline void enic_queue_wq_skb_tso(struct enic *enic, -	struct vnic_wq *wq, struct sk_buff *skb, unsigned int mss, -	int vlan_tag_insert, unsigned int vlan_tag, int loopback) +static int enic_queue_wq_skb_tso(struct enic *enic, struct vnic_wq *wq, +				 struct sk_buff *skb, unsigned int mss, +				 int vlan_tag_insert, unsigned int vlan_tag, +				 int loopback)  {  	unsigned int frag_len_left = skb_headlen(skb);  	unsigned int len_left = skb->len - frag_len_left; @@ -454,20 +469,19 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,  	 */  	while (frag_len_left) {  		len = min(frag_len_left, (unsigned int)WQ_ENET_MAX_DESC_LEN); -		dma_addr = pci_map_single(enic->pdev, skb->data + offset, -				len, PCI_DMA_TODEVICE); -		enic_queue_wq_desc_tso(wq, skb, -			dma_addr, -			len, -			mss, hdr_len, -			vlan_tag_insert, vlan_tag, -			eop && (len == frag_len_left), loopback); +		dma_addr = pci_map_single(enic->pdev, skb->data + offset, len, +					  PCI_DMA_TODEVICE); +		if (unlikely(enic_dma_map_check(enic, dma_addr))) +			return -ENOMEM; +		enic_queue_wq_desc_tso(wq, skb, dma_addr, len, mss, hdr_len, +				       vlan_tag_insert, vlan_tag, +				       eop && (len == frag_len_left), loopback);  		frag_len_left -= len;  		offset += len;  	}  	if (eop) -		return; +		return 0;  	/* Queue WQ_ENET_MAX_DESC_LEN length descriptors  	 * for additional data fragments @@ -483,16 +497,18 @@ static inline void enic_queue_wq_skb_tso(struct enic *enic,  			dma_addr = skb_frag_dma_map(&enic->pdev->dev, frag,  						    offset, len,  						    DMA_TO_DEVICE); -			enic_queue_wq_desc_cont(wq, skb, -				dma_addr, -				len, -				(len_left == 0) && -				(len == frag_len_left),		/* EOP? */ -				loopback); +			if (unlikely(enic_dma_map_check(enic, dma_addr))) +				return -ENOMEM; +			enic_queue_wq_desc_cont(wq, skb, dma_addr, len, +						(len_left == 0) && +						 (len == frag_len_left),/*EOP*/ +						loopback);  			frag_len_left -= len;  			offset += len;  		}  	} + +	return 0;  }  static inline void enic_queue_wq_skb(struct enic *enic, @@ -502,25 +518,42 @@ static inline void enic_queue_wq_skb(struct enic *enic,  	unsigned int vlan_tag = 0;  	int vlan_tag_insert = 0;  	int loopback = 0; +	int err; -	if (vlan_tx_tag_present(skb)) { +	if (skb_vlan_tag_present(skb)) {  		/* VLAN tag from trunking driver */  		vlan_tag_insert = 1; -		vlan_tag = vlan_tx_tag_get(skb); +		vlan_tag = skb_vlan_tag_get(skb);  	} else if (enic->loop_enable) {  		vlan_tag = enic->loop_tag;  		loopback = 1;  	}  	if (mss) -		enic_queue_wq_skb_tso(enic, wq, skb, mss, -			vlan_tag_insert, vlan_tag, loopback); +		err = enic_queue_wq_skb_tso(enic, wq, skb, mss, +					    vlan_tag_insert, vlan_tag, +					    loopback);  	else if	(skb->ip_summed == CHECKSUM_PARTIAL) -		enic_queue_wq_skb_csum_l4(enic, wq, skb, -			vlan_tag_insert, vlan_tag, loopback); +		err = enic_queue_wq_skb_csum_l4(enic, wq, skb, vlan_tag_insert, +						vlan_tag, loopback);  	else -		enic_queue_wq_skb_vlan(enic, wq, skb, -			vlan_tag_insert, vlan_tag, loopback); +		err = enic_queue_wq_skb_vlan(enic, wq, skb, vlan_tag_insert, +					     vlan_tag, loopback); +	if (unlikely(err)) { +		struct vnic_wq_buf *buf; + +		buf = wq->to_use->prev; +		/* while not EOP of previous pkt && queue not empty. +		 * For all non EOP bufs, os_buf is NULL. +		 */ +		while (!buf->os_buf && (buf->next != wq->to_clean)) { +			enic_free_wq_buf(wq, buf); +			wq->ring.desc_avail++; +			buf = buf->prev; +		} +		wq->to_use = buf->next; +		dev_kfree_skb(skb); +	}  }  /* netif_tx_lock held, process context with BHs disabled, or BH */ @@ -950,8 +983,12 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)  	if (!skb)  		return -ENOMEM; -	dma_addr = pci_map_single(enic->pdev, skb->data, -		len, PCI_DMA_FROMDEVICE); +	dma_addr = pci_map_single(enic->pdev, skb->data, len, +				  PCI_DMA_FROMDEVICE); +	if (unlikely(enic_dma_map_check(enic, dma_addr))) { +		dev_kfree_skb(skb); +		return -ENOMEM; +	}  	enic_queue_rq_desc(rq, skb, os_buf_index,  		dma_addr, len); @@ -1266,7 +1303,7 @@ static void enic_set_rx_cpu_rmap(struct enic *enic)  #endif /* CONFIG_RFS_ACCEL */  #ifdef CONFIG_NET_RX_BUSY_POLL -int enic_busy_poll(struct napi_struct *napi) +static int enic_busy_poll(struct napi_struct *napi)  {  	struct net_device *netdev = napi->dev;  	struct enic *enic = netdev_priv(netdev); @@ -2231,6 +2268,18 @@ static void enic_dev_deinit(struct enic *enic)  	enic_clear_intr_mode(enic);  } +static void enic_kdump_kernel_config(struct enic *enic) +{ +	if (is_kdump_kernel()) { +		dev_info(enic_get_dev(enic), "Running from within kdump kernel. Using minimal resources\n"); +		enic->rq_count = 1; +		enic->wq_count = 1; +		enic->config.rq_desc_count = ENIC_MIN_RQ_DESCS; +		enic->config.wq_desc_count = ENIC_MIN_WQ_DESCS; +		enic->config.mtu = min_t(u16, 1500, enic->config.mtu); +	} +} +  static int enic_dev_init(struct enic *enic)  {  	struct device *dev = enic_get_dev(enic); @@ -2260,6 +2309,10 @@ static int enic_dev_init(struct enic *enic)  	enic_get_res_counts(enic); +	/* modify resource count if we are in kdump_kernel +	 */ +	enic_kdump_kernel_config(enic); +  	/* Set interrupt mode based on resource counts and system  	 * capabilities  	 */  | 

