diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/caif/cfserl.c | 6 | ||||
-rw-r--r-- | net/core/skbuff.c | 42 | ||||
-rw-r--r-- | net/ipv4/netfilter/ip_tables.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 4 | ||||
-rw-r--r-- | net/ipv4/udp.c | 4 | ||||
-rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 2 | ||||
-rw-r--r-- | net/ipv6/route.c | 2 | ||||
-rw-r--r-- | net/mac80211/chan.c | 2 | ||||
-rw-r--r-- | net/netfilter/x_tables.c | 17 | ||||
-rw-r--r-- | net/phonet/pep.c | 6 | ||||
-rw-r--r-- | net/rds/ib_cm.c | 1 | ||||
-rw-r--r-- | net/rds/iw_cm.c | 1 |
12 files changed, 56 insertions, 33 deletions
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c index cb4325a3dc83..965c5baace40 100644 --- a/net/caif/cfserl.c +++ b/net/caif/cfserl.c @@ -59,16 +59,18 @@ static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt) u8 stx = CFSERL_STX; int ret; u16 expectlen = 0; + caif_assert(newpkt != NULL); spin_lock(&layr->sync); if (layr->incomplete_frm != NULL) { - layr->incomplete_frm = cfpkt_append(layr->incomplete_frm, newpkt, expectlen); pkt = layr->incomplete_frm; - if (pkt == NULL) + if (pkt == NULL) { + spin_unlock(&layr->sync); return -ENOMEM; + } } else { pkt = newpkt; } diff --git a/net/core/skbuff.c b/net/core/skbuff.c index f8abf68e3988..9f07e749d7b1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -482,22 +482,22 @@ EXPORT_SYMBOL(consume_skb); * reference count dropping and cleans up the skbuff as if it * just came from __alloc_skb(). */ -int skb_recycle_check(struct sk_buff *skb, int skb_size) +bool skb_recycle_check(struct sk_buff *skb, int skb_size) { struct skb_shared_info *shinfo; if (irqs_disabled()) - return 0; + return false; if (skb_is_nonlinear(skb) || skb->fclone != SKB_FCLONE_UNAVAILABLE) - return 0; + return false; skb_size = SKB_DATA_ALIGN(skb_size + NET_SKB_PAD); if (skb_end_pointer(skb) - skb->head < skb_size) - return 0; + return false; if (skb_shared(skb) || skb_cloned(skb)) - return 0; + return false; skb_release_head_state(skb); @@ -509,7 +509,7 @@ int skb_recycle_check(struct sk_buff *skb, int skb_size) skb->data = skb->head + NET_SKB_PAD; skb_reset_tail_pointer(skb); - return 1; + return true; } EXPORT_SYMBOL(skb_recycle_check); @@ -2965,6 +2965,34 @@ int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer) } EXPORT_SYMBOL_GPL(skb_cow_data); +static void sock_rmem_free(struct sk_buff *skb) +{ + struct sock *sk = skb->sk; + + atomic_sub(skb->truesize, &sk->sk_rmem_alloc); +} + +/* + * Note: We dont mem charge error packets (no sk_forward_alloc changes) + */ +int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb) +{ + if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= + (unsigned)sk->sk_rcvbuf) + return -ENOMEM; + + skb_orphan(skb); + skb->sk = sk; + skb->destructor = sock_rmem_free; + atomic_add(skb->truesize, &sk->sk_rmem_alloc); + + skb_queue_tail(&sk->sk_error_queue, skb); + if (!sock_flag(sk, SOCK_DEAD)) + sk->sk_data_ready(sk, skb->len); + return 0; +} +EXPORT_SYMBOL(sock_queue_err_skb); + void skb_tstamp_tx(struct sk_buff *orig_skb, struct skb_shared_hwtstamps *hwtstamps) { @@ -2996,7 +3024,9 @@ void skb_tstamp_tx(struct sk_buff *orig_skb, memset(serr, 0, sizeof(*serr)); serr->ee.ee_errno = ENOMSG; serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; + err = sock_queue_err_skb(sk, skb); + if (err) kfree_skb(skb); } diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index 63958f3394a5..4b6c5ca610fc 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -336,7 +336,7 @@ ipt_do_table(struct sk_buff *skb, cpu = smp_processor_id(); table_base = private->entries[cpu]; jumpstack = (struct ipt_entry **)private->jumpstack[cpu]; - stackptr = &private->stackptr[cpu]; + stackptr = per_cpu_ptr(private->stackptr, cpu); origptr = *stackptr; e = get_entry(table_base, private->hook_entry[hook]); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3e6dafcb1071..548d575e6cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -2639,7 +2639,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) if (sk->sk_family == AF_INET) { printk(KERN_DEBUG "Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, - &inet->daddr, ntohs(inet->dport), + &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); @@ -2649,7 +2649,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) struct ipv6_pinfo *np = inet6_sk(sk); printk(KERN_DEBUG "Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, - &np->daddr, ntohs(inet->dport), + &np->daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 58585748bdac..eec4ff456e33 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -633,9 +633,9 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable) if (!inet->recverr) { if (!harderr || sk->sk_state != TCP_ESTABLISHED) goto out; - } else { + } else ip_icmp_error(sk, skb, err, uh->dest, info, (u8 *)(uh+1)); - } + sk->sk_err = err; sk->sk_error_report(sk); out: diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 6f517bd83692..9d2d68f0e605 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -363,7 +363,7 @@ ip6t_do_table(struct sk_buff *skb, cpu = smp_processor_id(); table_base = private->entries[cpu]; jumpstack = (struct ip6t_entry **)private->jumpstack[cpu]; - stackptr = &private->stackptr[cpu]; + stackptr = per_cpu_ptr(private->stackptr, cpu); origptr = *stackptr; e = get_entry(table_base, private->hook_entry[hook]); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 294cbe8b0725..252d76199c41 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -814,7 +814,7 @@ struct dst_entry * ip6_route_output(struct net *net, struct sock *sk, { int flags = 0; - if (fl->oif || rt6_need_strict(&fl->fl6_dst)) + if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl->fl6_dst)) flags |= RT6_LOOKUP_F_IFACE; if (!ipv6_addr_any(&fl->fl6_src)) diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 5d218c530a4e..32be11e4c4d9 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -5,7 +5,7 @@ #include <linux/nl80211.h> #include "ieee80211_i.h" -enum ieee80211_chan_mode +static enum ieee80211_chan_mode __ieee80211_get_channel_mode(struct ieee80211_local *local, struct ieee80211_sub_if_data *ignore) { diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c index 445de702b8b7..e34622fa0003 100644 --- a/net/netfilter/x_tables.c +++ b/net/netfilter/x_tables.c @@ -699,10 +699,8 @@ void xt_free_table_info(struct xt_table_info *info) vfree(info->jumpstack); else kfree(info->jumpstack); - if (sizeof(unsigned int) * nr_cpu_ids > PAGE_SIZE) - vfree(info->stackptr); - else - kfree(info->stackptr); + + free_percpu(info->stackptr); kfree(info); } @@ -753,14 +751,9 @@ static int xt_jumpstack_alloc(struct xt_table_info *i) unsigned int size; int cpu; - size = sizeof(unsigned int) * nr_cpu_ids; - if (size > PAGE_SIZE) - i->stackptr = vmalloc(size); - else - i->stackptr = kmalloc(size, GFP_KERNEL); + i->stackptr = alloc_percpu(unsigned int); if (i->stackptr == NULL) return -ENOMEM; - memset(i->stackptr, 0, size); size = sizeof(void **) * nr_cpu_ids; if (size > PAGE_SIZE) @@ -844,10 +837,6 @@ struct xt_table *xt_register_table(struct net *net, struct xt_table_info *private; struct xt_table *t, *table; - ret = xt_jumpstack_alloc(newinfo); - if (ret < 0) - return ERR_PTR(ret); - /* Don't add one object to multiple lists. */ table = kmemdup(input_table, sizeof(struct xt_table), GFP_KERNEL); if (!table) { diff --git a/net/phonet/pep.c b/net/phonet/pep.c index 7b048a35ca58..94d72e85a475 100644 --- a/net/phonet/pep.c +++ b/net/phonet/pep.c @@ -1045,12 +1045,12 @@ static void pep_sock_unhash(struct sock *sk) lock_sock(sk); if ((1 << sk->sk_state) & ~(TCPF_CLOSE|TCPF_LISTEN)) { skparent = pn->listener; - sk_del_node_init(sk); release_sock(sk); - sk = skparent; pn = pep_sk(skparent); - lock_sock(sk); + lock_sock(skparent); + sk_del_node_init(sk); + sk = skparent; } /* Unhash a listening sock only when it is closed * and all of its active connected pipes are closed. */ diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index 10ed0d55f759..f68832798db2 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c @@ -475,6 +475,7 @@ int rds_ib_cm_handle_connect(struct rdma_cm_id *cm_id, err = rds_ib_setup_qp(conn); if (err) { rds_ib_conn_error(conn, "rds_ib_setup_qp failed (%d)\n", err); + mutex_unlock(&conn->c_cm_lock); goto out; } diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index a9d951b4fbae..b5dd6ac39be8 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c @@ -452,6 +452,7 @@ int rds_iw_cm_handle_connect(struct rdma_cm_id *cm_id, err = rds_iw_setup_qp(conn); if (err) { rds_iw_conn_error(conn, "rds_iw_setup_qp failed (%d)\n", err); + mutex_unlock(&conn->c_cm_lock); goto out; } |