diff options
| author | David S. Miller <davem@davemloft.net> | 2014-11-01 14:53:27 -0400 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2014-11-01 14:53:27 -0400 | 
| commit | 55b42b5ca2dcf143465968697fe6c6503b05fca1 (patch) | |
| tree | 91878cd53efc44ba67244d4d3897020828c87c01 /drivers/net/xen-netback/interface.c | |
| parent | 10738eeaf4ab3de092586cefcc082e7d43ca0044 (diff) | |
| parent | ec1f1276022e4e3ca40871810217d513e39ff250 (diff) | |
| download | blackbird-op-linux-55b42b5ca2dcf143465968697fe6c6503b05fca1.tar.gz blackbird-op-linux-55b42b5ca2dcf143465968697fe6c6503b05fca1.zip | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
	drivers/net/phy/marvell.c
Simple overlapping changes in drivers/net/phy/marvell.c
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/xen-netback/interface.c')
| -rw-r--r-- | drivers/net/xen-netback/interface.c | 74 | 
1 files changed, 15 insertions, 59 deletions
| diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 7342a6bb5557..a6a32d337bbb 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -43,6 +43,9 @@  #define XENVIF_QUEUE_LENGTH 32  #define XENVIF_NAPI_WEIGHT  64 +/* Number of bytes allowed on the internal guest Rx queue. */ +#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE) +  /* This function is used to set SKBTX_DEV_ZEROCOPY as well as   * increasing the inflight counter. We need to increase the inflight   * counter because core driver calls into xenvif_zerocopy_callback @@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)  	atomic_dec(&queue->inflight_packets);  } -static inline void xenvif_stop_queue(struct xenvif_queue *queue) -{ -	struct net_device *dev = queue->vif->dev; - -	if (!queue->vif->can_queue) -		return; - -	netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id)); -} -  int xenvif_schedulable(struct xenvif *vif)  {  	return netif_running(vif->dev) && -		test_bit(VIF_STATUS_CONNECTED, &vif->status); +		test_bit(VIF_STATUS_CONNECTED, &vif->status) && +		!vif->disabled;  }  static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) @@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)  static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)  {  	struct xenvif_queue *queue = dev_id; -	struct netdev_queue *net_queue = -		netdev_get_tx_queue(queue->vif->dev, queue->id); -	/* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR -	 * the carrier went down and this queue was previously blocked -	 */ -	if (unlikely(netif_tx_queue_stopped(net_queue) || -		     (!netif_carrier_ok(queue->vif->dev) && -		      test_bit(QUEUE_STATUS_RX_STALLED, &queue->status)))) -		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);  	xenvif_kick_thread(queue);  	return IRQ_HANDLED; @@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)  	netif_tx_wake_queue(netdev_get_tx_queue(dev, id));  } -/* Callback to wake the queue's thread and turn the carrier off on timeout */ -static void xenvif_rx_stalled(unsigned long data) -{ -	struct xenvif_queue *queue = (struct xenvif_queue *)data; - -	if (xenvif_queue_stopped(queue)) { -		set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status); -		xenvif_kick_thread(queue); -	} -} -  static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)  {  	struct xenvif *vif = netdev_priv(dev);  	struct xenvif_queue *queue = NULL;  	unsigned int num_queues = vif->num_queues;  	u16 index; -	int min_slots_needed; +	struct xenvif_rx_cb *cb;  	BUG_ON(skb->dev != dev); @@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)  	    !xenvif_schedulable(vif))  		goto drop; -	/* At best we'll need one slot for the header and one for each -	 * frag. -	 */ -	min_slots_needed = 1 + skb_shinfo(skb)->nr_frags; - -	/* If the skb is GSO then we'll also need an extra slot for the -	 * metadata. -	 */ -	if (skb_is_gso(skb)) -		min_slots_needed++; +	cb = XENVIF_RX_CB(skb); +	cb->expires = jiffies + rx_drain_timeout_jiffies; -	/* If the skb can't possibly fit in the remaining slots -	 * then turn off the queue to give the ring a chance to -	 * drain. -	 */ -	if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) { -		queue->rx_stalled.function = xenvif_rx_stalled; -		queue->rx_stalled.data = (unsigned long)queue; -		xenvif_stop_queue(queue); -		mod_timer(&queue->rx_stalled, -			  jiffies + rx_drain_timeout_jiffies); -	} - -	skb_queue_tail(&queue->rx_queue, skb); +	xenvif_rx_queue_tail(queue, skb);  	xenvif_kick_thread(queue);  	return NETDEV_TX_OK; @@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,  	vif->queues = NULL;  	vif->num_queues = 0; +	spin_lock_init(&vif->lock); +  	dev->netdev_ops	= &xenvif_netdev_ops;  	dev->hw_features = NETIF_F_SG |  		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | @@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)  	init_timer(&queue->credit_timeout);  	queue->credit_window_start = get_jiffies_64(); +	queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES; +  	skb_queue_head_init(&queue->rx_queue);  	skb_queue_head_init(&queue->tx_queue); @@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)  		queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;  	} -	init_timer(&queue->rx_stalled); -  	return 0;  } @@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)  		dev_set_mtu(vif->dev, ETH_DATA_LEN);  	netdev_update_features(vif->dev);  	set_bit(VIF_STATUS_CONNECTED, &vif->status); -	netif_carrier_on(vif->dev);  	if (netif_running(vif->dev))  		xenvif_up(vif);  	rtnl_unlock(); @@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,  		disable_irq(queue->rx_irq);  	} +	queue->stalled = true; +  	task = kthread_create(xenvif_kthread_guest_rx,  			      (void *)queue, "%s-guest-rx", queue->name);  	if (IS_ERR(task)) { @@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)  		netif_napi_del(&queue->napi);  		if (queue->task) { -			del_timer_sync(&queue->rx_stalled);  			kthread_stop(queue->task);  			queue->task = NULL;  		} | 

