summaryrefslogtreecommitdiffstats
path: root/drivers/net/hyperv
diff options
context:
space:
mode:
authorHaiyang Zhang <haiyangz@microsoft.com>2012-03-27 13:20:45 +0000
committerDavid S. Miller <davem@davemloft.net>2012-04-03 17:47:15 -0400
commit33be96e47cc27f2f1a753a0707b02a73df8c8d46 (patch)
tree2b0361f4d39a14e2ddd0d23e66656e6174b3cb71 /drivers/net/hyperv
parentede7193d4fdca98178240500d8684dbc139ca26f (diff)
downloadblackbird-op-linux-33be96e47cc27f2f1a753a0707b02a73df8c8d46.tar.gz
blackbird-op-linux-33be96e47cc27f2f1a753a0707b02a73df8c8d46.zip
net/hyperv: Add flow control based on hi/low watermark
In the existing code, we only stop queue when the ringbuffer is full, so the current packet has to be dropped or retried from upper layer. This patch stops the tx queue when available ringbuffer is below the low watermark. So the ringbuffer still has small amount of space available for the current packet. This will reduce the overhead of retries on sending. Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com> Reviewed-by: K. Y. Srinivasan <kys@microsoft.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/hyperv')
-rw-r--r--drivers/net/hyperv/netvsc.c41
-rw-r--r--drivers/net/hyperv/netvsc_drv.c6
2 files changed, 42 insertions, 5 deletions
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index d025c83cd12a..8b919471472f 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -428,6 +428,24 @@ int netvsc_device_remove(struct hv_device *device)
return 0;
}
+
+#define RING_AVAIL_PERCENT_HIWATER 20
+#define RING_AVAIL_PERCENT_LOWATER 10
+
+/*
+ * Get the percentage of available bytes to write in the ring.
+ * The return value is in range from 0 to 100.
+ */
+static inline u32 hv_ringbuf_avail_percent(
+ struct hv_ring_buffer_info *ring_info)
+{
+ u32 avail_read, avail_write;
+
+ hv_get_ringbuffer_availbytes(ring_info, &avail_read, &avail_write);
+
+ return avail_write * 100 / ring_info->ring_datasize;
+}
+
static void netvsc_send_completion(struct hv_device *device,
struct vmpacket_descriptor *packet)
{
@@ -455,6 +473,8 @@ static void netvsc_send_completion(struct hv_device *device,
complete(&net_device->channel_init_wait);
} else if (nvsp_packet->hdr.msg_type ==
NVSP_MSG1_TYPE_SEND_RNDIS_PKT_COMPLETE) {
+ int num_outstanding_sends;
+
/* Get the send context */
nvsc_packet = (struct hv_netvsc_packet *)(unsigned long)
packet->trans_id;
@@ -463,10 +483,14 @@ static void netvsc_send_completion(struct hv_device *device,
nvsc_packet->completion.send.send_completion(
nvsc_packet->completion.send.send_completion_ctx);
- atomic_dec(&net_device->num_outstanding_sends);
+ num_outstanding_sends =
+ atomic_dec_return(&net_device->num_outstanding_sends);
- if (netif_queue_stopped(ndev) && !net_device->start_remove)
- netif_wake_queue(ndev);
+ if (netif_queue_stopped(ndev) && !net_device->start_remove &&
+ (hv_ringbuf_avail_percent(&device->channel->outbound)
+ > RING_AVAIL_PERCENT_HIWATER ||
+ num_outstanding_sends < 1))
+ netif_wake_queue(ndev);
} else {
netdev_err(ndev, "Unknown send completion packet type- "
"%d received!!\n", nvsp_packet->hdr.msg_type);
@@ -519,10 +543,19 @@ int netvsc_send(struct hv_device *device,
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
+ if (hv_ringbuf_avail_percent(&device->channel->outbound) <
+ RING_AVAIL_PERCENT_LOWATER) {
+ netif_stop_queue(ndev);
+ if (atomic_read(&net_device->
+ num_outstanding_sends) < 1)
+ netif_wake_queue(ndev);
+ }
} else if (ret == -EAGAIN) {
netif_stop_queue(ndev);
- if (atomic_read(&net_device->num_outstanding_sends) < 1)
+ if (atomic_read(&net_device->num_outstanding_sends) < 1) {
netif_wake_queue(ndev);
+ ret = -ENOSPC;
+ }
} else {
netdev_err(ndev, "Unable to send packet %p ret %d\n",
packet, ret);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index dd294783b5c5..a0cc12786be4 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -224,9 +224,13 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
net->stats.tx_packets++;
} else {
kfree(packet);
+ if (ret != -EAGAIN) {
+ dev_kfree_skb_any(skb);
+ net->stats.tx_dropped++;
+ }
}
- return ret ? NETDEV_TX_BUSY : NETDEV_TX_OK;
+ return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK;
}
/*
OpenPOWER on IntegriCloud