summaryrefslogtreecommitdiffstats
path: root/drivers/net/virtio_net.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/virtio_net.c')
-rw-r--r--drivers/net/virtio_net.c129
1 files changed, 99 insertions, 30 deletions
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index f34794a76c4d..2d55e2af7cb6 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -30,8 +30,11 @@
#include <linux/cpu.h>
#include <linux/average.h>
#include <linux/filter.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
#include <net/route.h>
#include <net/xdp.h>
+#include <net/net_failover.h>
static int napi_weight = NAPI_POLL_WEIGHT;
module_param(napi_weight, int, 0444);
@@ -210,6 +213,9 @@ struct virtnet_info {
u32 speed;
unsigned long guest_offloads;
+
+ /* failover when STANDBY feature enabled */
+ struct failover *failover;
};
struct padded_vnet_hdr {
@@ -419,23 +425,13 @@ static void virtnet_xdp_flush(struct net_device *dev)
virtqueue_kick(sq->vq);
}
-static int __virtnet_xdp_xmit(struct virtnet_info *vi,
- struct xdp_frame *xdpf)
+static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
+ struct send_queue *sq,
+ struct xdp_frame *xdpf)
{
struct virtio_net_hdr_mrg_rxbuf *hdr;
- struct xdp_frame *xdpf_sent;
- struct send_queue *sq;
- unsigned int len;
- unsigned int qp;
int err;
- qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
- sq = &vi->sq[qp];
-
- /* Free up any pending old buffers before queueing new ones. */
- while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
- xdp_return_frame(xdpf_sent);
-
/* virtqueue want to use data area in-front of packet */
if (unlikely(xdpf->metasize > 0))
return -EOPNOTSUPP;
@@ -459,11 +455,40 @@ static int __virtnet_xdp_xmit(struct virtnet_info *vi,
return 0;
}
-static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf)
+static int __virtnet_xdp_tx_xmit(struct virtnet_info *vi,
+ struct xdp_frame *xdpf)
+{
+ struct xdp_frame *xdpf_sent;
+ struct send_queue *sq;
+ unsigned int len;
+ unsigned int qp;
+
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+ sq = &vi->sq[qp];
+
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
+ xdp_return_frame(xdpf_sent);
+
+ return __virtnet_xdp_xmit_one(vi, sq, xdpf);
+}
+
+static int virtnet_xdp_xmit(struct net_device *dev,
+ int n, struct xdp_frame **frames)
{
struct virtnet_info *vi = netdev_priv(dev);
struct receive_queue *rq = vi->rq;
+ struct xdp_frame *xdpf_sent;
struct bpf_prog *xdp_prog;
+ struct send_queue *sq;
+ unsigned int len;
+ unsigned int qp;
+ int drops = 0;
+ int err;
+ int i;
+
+ qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
+ sq = &vi->sq[qp];
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
* indicate XDP resources have been successfully allocated.
@@ -472,7 +497,20 @@ static int virtnet_xdp_xmit(struct net_device *dev, struct xdp_frame *xdpf)
if (!xdp_prog)
return -ENXIO;
- return __virtnet_xdp_xmit(vi, xdpf);
+ /* Free up any pending old buffers before queueing new ones. */
+ while ((xdpf_sent = virtqueue_get_buf(sq->vq, &len)) != NULL)
+ xdp_return_frame(xdpf_sent);
+
+ for (i = 0; i < n; i++) {
+ struct xdp_frame *xdpf = frames[i];
+
+ err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
+ if (err) {
+ xdp_return_frame_rx_napi(xdpf);
+ drops++;
+ }
+ }
+ return n - drops;
}
static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
@@ -616,7 +654,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_xmit(vi, xdpf);
+ err = __virtnet_xdp_tx_xmit(vi, xdpf);
if (unlikely(err)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
@@ -713,6 +751,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *data;
u32 act;
+ /* Transient failure which in theory could occur if
+ * in-flight packets from before XDP was enabled reach
+ * the receive path after XDP is loaded.
+ */
+ if (unlikely(hdr->hdr.gso_type))
+ goto err_xdp;
+
/* This happens when rx buffer size is underestimated
* or headroom is not enough because of the buffer
* was refilled before XDP is set. This should only
@@ -733,14 +778,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdp_page = page;
}
- /* Transient failure which in theory could occur if
- * in-flight packets from before XDP was enabled reach
- * the receive path after XDP is loaded. In practice I
- * was not able to create this condition.
- */
- if (unlikely(hdr->hdr.gso_type))
- goto err_xdp;
-
/* Allow consuming headroom but reserve enough space to push
* the descriptor on if we get an XDP_TX return code.
*/
@@ -779,7 +816,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
xdpf = convert_to_xdp_frame(&xdp);
if (unlikely(!xdpf))
goto err_xdp;
- err = __virtnet_xdp_xmit(vi, xdpf);
+ err = __virtnet_xdp_tx_xmit(vi, xdpf);
if (unlikely(err)) {
trace_xdp_exception(vi->dev, xdp_prog, act);
if (unlikely(xdp_page != page))
@@ -788,7 +825,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
*xdp_xmit = true;
if (unlikely(xdp_page != page))
- goto err_xdp;
+ put_page(page);
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
@@ -800,7 +837,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
}
*xdp_xmit = true;
if (unlikely(xdp_page != page))
- goto err_xdp;
+ put_page(page);
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -888,7 +925,7 @@ err_xdp:
rcu_read_unlock();
err_skb:
put_page(page);
- while (--num_buf) {
+ while (num_buf-- > 1) {
buf = virtqueue_get_buf(rq->vq, &len);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
@@ -1523,6 +1560,9 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
struct sockaddr *addr;
struct scatterlist sg;
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
+ return -EOPNOTSUPP;
+
addr = kmemdup(p, sizeof(*addr), GFP_KERNEL);
if (!addr)
return -ENOMEM;
@@ -2306,6 +2346,22 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
+static int virtnet_get_phys_port_name(struct net_device *dev, char *buf,
+ size_t len)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int ret;
+
+ if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
+ return -EOPNOTSUPP;
+
+ ret = snprintf(buf, len, "sby");
+ if (ret >= len)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
static const struct net_device_ops virtnet_netdev = {
.ndo_open = virtnet_open,
.ndo_stop = virtnet_close,
@@ -2323,6 +2379,7 @@ static const struct net_device_ops virtnet_netdev = {
.ndo_xdp_xmit = virtnet_xdp_xmit,
.ndo_xdp_flush = virtnet_xdp_flush,
.ndo_features_check = passthru_features_check,
+ .ndo_get_phys_port_name = virtnet_get_phys_port_name,
};
static void virtnet_config_changed_work(struct work_struct *work)
@@ -2876,10 +2933,18 @@ static int virtnet_probe(struct virtio_device *vdev)
virtnet_init_settings(dev);
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_STANDBY)) {
+ vi->failover = net_failover_create(vi->dev);
+ if (IS_ERR(vi->failover)) {
+ err = PTR_ERR(vi->failover);
+ goto free_vqs;
+ }
+ }
+
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
- goto free_vqs;
+ goto free_failover;
}
virtio_device_ready(vdev);
@@ -2916,6 +2981,8 @@ free_unregister_netdev:
vi->vdev->config->reset(vdev);
unregister_netdev(dev);
+free_failover:
+ net_failover_destroy(vi->failover);
free_vqs:
cancel_delayed_work_sync(&vi->refill);
free_receive_page_frags(vi);
@@ -2950,6 +3017,8 @@ static void virtnet_remove(struct virtio_device *vdev)
unregister_netdev(vi->dev);
+ net_failover_destroy(vi->failover);
+
remove_vq_common(vi);
free_netdev(vi->dev);
@@ -2999,7 +3068,7 @@ static struct virtio_device_id id_table[] = {
VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, \
VIRTIO_NET_F_CTRL_MAC_ADDR, \
VIRTIO_NET_F_MTU, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
- VIRTIO_NET_F_SPEED_DUPLEX
+ VIRTIO_NET_F_SPEED_DUPLEX, VIRTIO_NET_F_STANDBY
static unsigned int features[] = {
VIRTNET_FEATURES,
OpenPOWER on IntegriCloud