summaryrefslogtreecommitdiffstats
path: root/net/packet/af_packet.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/packet/af_packet.c')
-rw-r--r--net/packet/af_packet.c133
1 files changed, 108 insertions, 25 deletions
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 88cfbc189558..279467b74eb7 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,48 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
+static int packet_direct_xmit(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ const struct net_device_ops *ops = dev->netdev_ops;
+ netdev_features_t features;
+ struct netdev_queue *txq;
+ u16 queue_map;
+ int ret;
+
+ if (unlikely(!netif_running(dev) ||
+ !netif_carrier_ok(dev))) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ features = netif_skb_features(skb);
+ if (skb_needs_linearize(skb, features) &&
+ __skb_linearize(skb)) {
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
+ }
+
+ queue_map = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue_map);
+
+ __netif_tx_lock_bh(txq);
+ if (unlikely(netif_xmit_frozen_or_stopped(txq))) {
+ ret = NETDEV_TX_BUSY;
+ kfree_skb(skb);
+ goto out;
+ }
+
+ ret = ops->ndo_start_xmit(skb, dev);
+ if (likely(dev_xmit_complete(ret)))
+ txq_trans_update(txq);
+ else
+ kfree_skb(skb);
+out:
+ __netif_tx_unlock_bh(txq);
+ return ret;
+}
+
static struct net_device *packet_cached_dev_get(struct packet_sock *po)
{
struct net_device *dev;
@@ -261,6 +303,16 @@ static void packet_cached_dev_reset(struct packet_sock *po)
RCU_INIT_POINTER(po->cached_dev, NULL);
}
+static bool packet_use_direct_xmit(const struct packet_sock *po)
+{
+ return po->xmit == packet_direct_xmit;
+}
+
+static u16 packet_pick_tx_queue(struct net_device *dev)
+{
+ return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
+}
+
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@@ -458,7 +510,8 @@ static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
{
struct tpacket_kbdq_core *pkc;
- pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
+ GET_PBDQC_FROM_RB(&po->rx_ring);
spin_lock_bh(&rb_queue->lock);
pkc->delete_blk_timer = 1;
@@ -484,7 +537,8 @@ static void prb_setup_retire_blk_timer(struct packet_sock *po, int tx_ring)
if (tx_ring)
BUG();
- pkc = tx_ring ? &po->tx_ring.prb_bdqc : &po->rx_ring.prb_bdqc;
+ pkc = tx_ring ? GET_PBDQC_FROM_RB(&po->tx_ring) :
+ GET_PBDQC_FROM_RB(&po->rx_ring);
prb_init_blk_timer(po, pkc, prb_retire_rx_blk_timer_expired);
}
@@ -542,7 +596,7 @@ static void init_prb_bdqc(struct packet_sock *po,
struct pgv *pg_vec,
union tpacket_req_u *req_u, int tx_ring)
{
- struct tpacket_kbdq_core *p1 = &rb->prb_bdqc;
+ struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
struct tpacket_block_desc *pbd;
memset(p1, 0x0, sizeof(*p1));
@@ -606,7 +660,7 @@ static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
static void prb_retire_rx_blk_timer_expired(unsigned long data)
{
struct packet_sock *po = (struct packet_sock *)data;
- struct tpacket_kbdq_core *pkc = &po->rx_ring.prb_bdqc;
+ struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
unsigned int frozen;
struct tpacket_block_desc *pbd;
@@ -909,7 +963,7 @@ static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
- ppd->hv1.tp_rxhash = skb_get_rxhash(pkc->skb);
+ ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
}
static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
@@ -923,9 +977,11 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
{
if (vlan_tx_tag_present(pkc->skb)) {
ppd->hv1.tp_vlan_tci = vlan_tx_tag_get(pkc->skb);
- ppd->tp_status = TP_STATUS_VLAN_VALID;
+ ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
+ ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
ppd->hv1.tp_vlan_tci = 0;
+ ppd->hv1.tp_vlan_tpid = 0;
ppd->tp_status = TP_STATUS_AVAILABLE;
}
}
@@ -933,6 +989,7 @@ static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
struct tpacket3_hdr *ppd)
{
+ ppd->hv1.tp_padding = 0;
prb_fill_vlan_info(pkc, ppd);
if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
@@ -1241,7 +1298,7 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
if (!skb)
return 0;
}
- skb_get_rxhash(skb);
+ skb_get_hash(skb);
idx = fanout_demux_hash(f, skb, num);
break;
case PACKET_FANOUT_LB:
@@ -1299,9 +1356,9 @@ static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
spin_unlock(&f->lock);
}
-static bool match_fanout_group(struct packet_type *ptype, struct sock * sk)
+static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
{
- if (ptype->af_packet_priv == (void*)((struct packet_sock *)sk)->fanout)
+ if (ptype->af_packet_priv == (void *)((struct packet_sock *)sk)->fanout)
return true;
return false;
@@ -1758,6 +1815,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
struct timespec ts;
__u32 ts_status;
+ /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
+ * We may add members to them until current aligned size without forcing
+ * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
+ */
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
+ BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
+
if (skb->pkt_type == PACKET_LOOPBACK)
goto drop;
@@ -1864,11 +1928,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h2->tp_nsec = ts.tv_nsec;
if (vlan_tx_tag_present(skb)) {
h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
- status |= TP_STATUS_VLAN_VALID;
+ h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
+ status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
h.h2->tp_vlan_tci = 0;
+ h.h2->tp_vlan_tpid = 0;
}
- h.h2->tp_padding = 0;
+ memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
hdrlen = sizeof(*h.h2);
break;
case TPACKET_V3:
@@ -1882,6 +1948,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
h.h3->tp_net = netoff;
h.h3->tp_sec = ts.tv_sec;
h.h3->tp_nsec = ts.tv_nsec;
+ memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
hdrlen = sizeof(*h.h3);
break;
default:
@@ -1992,9 +2059,10 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb_reserve(skb, hlen);
skb_reset_network_header(skb);
- skb_probe_transport_header(skb, 0);
- if (po->tp_tx_has_off) {
+ if (!packet_use_direct_xmit(po))
+ skb_probe_transport_header(skb, 0);
+ if (unlikely(po->tp_tx_has_off)) {
int off_min, off_max, off;
off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
off_max = po->tx_ring.frame_size - tp_len;
@@ -2164,12 +2232,13 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
}
+ skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
atomic_inc(&po->tx_ring.pending);
status = TP_STATUS_SEND_REQUEST;
- err = dev_queue_xmit(skb);
+ err = po->xmit(skb);
if (unlikely(err > 0)) {
err = net_xmit_errno(err);
if (err && __packet_get_status(po, ph) ==
@@ -2228,8 +2297,7 @@ static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
return skb;
}
-static int packet_snd(struct socket *sock,
- struct msghdr *msg, size_t len)
+static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
{
struct sock *sk = sock->sk;
struct sockaddr_ll *saddr = (struct sockaddr_ll *)msg->msg_name;
@@ -2374,6 +2442,7 @@ static int packet_snd(struct socket *sock,
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
+ skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -2394,16 +2463,12 @@ static int packet_snd(struct socket *sock,
len += vnet_hdr_len;
}
- skb_probe_transport_header(skb, reserve);
-
+ if (!packet_use_direct_xmit(po))
+ skb_probe_transport_header(skb, reserve);
if (unlikely(extra_len == 4))
skb->no_fcs = 1;
- /*
- * Now send it
- */
-
- err = dev_queue_xmit(skb);
+ err = po->xmit(skb);
if (err > 0 && (err = net_xmit_errno(err)) != 0)
goto out_unlock;
@@ -2425,6 +2490,7 @@ static int packet_sendmsg(struct kiocb *iocb, struct socket *sock,
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
+
if (po->tx_ring.pg_vec)
return tpacket_snd(po, msg);
else
@@ -2639,6 +2705,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk);
sk->sk_family = PF_PACKET;
po->num = proto;
+ po->xmit = dev_queue_xmit;
packet_cached_dev_reset(po);
@@ -2813,11 +2880,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_net = skb_network_offset(skb);
if (vlan_tx_tag_present(skb)) {
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
- aux.tp_status |= TP_STATUS_VLAN_VALID;
+ aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
+ aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
} else {
aux.tp_vlan_tci = 0;
+ aux.tp_vlan_tpid = 0;
}
- aux.tp_padding = 0;
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}
@@ -3218,6 +3286,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
po->tp_tx_has_off = !!val;
return 0;
}
+ case PACKET_QDISC_BYPASS:
+ {
+ int val;
+
+ if (optlen != sizeof(val))
+ return -EINVAL;
+ if (copy_from_user(&val, optval, sizeof(val)))
+ return -EFAULT;
+
+ po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
+ return 0;
+ }
default:
return -ENOPROTOOPT;
}
@@ -3310,6 +3390,9 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
break;
+ case PACKET_QDISC_BYPASS:
+ val = packet_use_direct_xmit(po);
+ break;
default:
return -ENOPROTOOPT;
}
OpenPOWER on IntegriCloud