diff options
Diffstat (limited to 'net')
51 files changed, 402 insertions, 302 deletions
diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 9d52ebfc1962..05fafdc2eea3 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -188,10 +188,13 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct br2684_dev *brdev, return 0; } } - } else { - skb_push(skb, 2); - if (brdev->payload == p_bridged) + } else { /* e_vc */ + if (brdev->payload == p_bridged) { + skb_push(skb, 2); memset(skb->data, 0, 2); + } else { /* p_routed */ + skb_pull(skb, ETH_HLEN); + } } skb_debug(skb); @@ -377,11 +380,8 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) (skb->data + 6, ethertype_ipv4, sizeof(ethertype_ipv4)) == 0) skb->protocol = __constant_htons(ETH_P_IP); - else { - brdev->stats.rx_errors++; - dev_kfree_skb(skb); - return; - } + else + goto error; skb_pull(skb, sizeof(llc_oui_ipv4)); skb_reset_network_header(skb); skb->pkt_type = PACKET_HOST; @@ -394,44 +394,56 @@ static void br2684_push(struct atm_vcc *atmvcc, struct sk_buff *skb) (memcmp(skb->data, llc_oui_pid_pad, 7) == 0)) { skb_pull(skb, sizeof(llc_oui_pid_pad)); skb->protocol = eth_type_trans(skb, net_dev); - } else { - brdev->stats.rx_errors++; - dev_kfree_skb(skb); - return; - } + } else + goto error; - } else { - /* first 2 chars should be 0 */ - if (*((u16 *) (skb->data)) != 0) { - brdev->stats.rx_errors++; - dev_kfree_skb(skb); - return; + } else { /* e_vc */ + if (brdev->payload == p_routed) { + struct iphdr *iph; + + skb_reset_network_header(skb); + iph = ip_hdr(skb); + if (iph->version == 4) + skb->protocol = __constant_htons(ETH_P_IP); + else if (iph->version == 6) + skb->protocol = __constant_htons(ETH_P_IPV6); + else + goto error; + skb->pkt_type = PACKET_HOST; + } else { /* p_bridged */ + /* first 2 chars should be 0 */ + if (*((u16 *) (skb->data)) != 0) + goto error; + skb_pull(skb, BR2684_PAD_LEN); + skb->protocol = eth_type_trans(skb, net_dev); } - skb_pull(skb, BR2684_PAD_LEN + ETH_HLEN); /* pad, dstmac, srcmac, ethtype */ - skb->protocol = eth_type_trans(skb, net_dev); } #ifdef CONFIG_ATM_BR2684_IPFILTER - if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb))) { - brdev->stats.rx_dropped++; - dev_kfree_skb(skb); - return; - } + if (unlikely(packet_fails_filter(skb->protocol, brvcc, skb))) + goto dropped; #endif /* CONFIG_ATM_BR2684_IPFILTER */ skb->dev = net_dev; ATM_SKB(skb)->vcc = atmvcc; /* needed ? */ pr_debug("received packet's protocol: %x\n", ntohs(skb->protocol)); skb_debug(skb); - if (unlikely(!(net_dev->flags & IFF_UP))) { - /* sigh, interface is down */ - brdev->stats.rx_dropped++; - dev_kfree_skb(skb); - return; - } + /* sigh, interface is down? */ + if (unlikely(!(net_dev->flags & IFF_UP))) + goto dropped; brdev->stats.rx_packets++; brdev->stats.rx_bytes += skb->len; memset(ATM_SKB(skb), 0, sizeof(struct atm_skb_data)); netif_rx(skb); + return; + +dropped: + brdev->stats.rx_dropped++; + goto free_skb; +error: + brdev->stats.rx_errors++; +free_skb: + dev_kfree_skb(skb); + return; } /* @@ -518,9 +530,9 @@ static int br2684_regvcc(struct atm_vcc *atmvcc, void __user * arg) struct sk_buff *next = skb->next; skb->next = skb->prev = NULL; + br2684_push(atmvcc, skb); BRPRIV(skb->dev)->stats.rx_bytes -= skb->len; BRPRIV(skb->dev)->stats.rx_packets--; - br2684_push(atmvcc, skb); skb = next; } diff --git a/net/core/dev.c b/net/core/dev.c index 582963077877..c421a1f8f0b9 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -119,6 +119,7 @@ #include <linux/err.h> #include <linux/ctype.h> #include <linux/if_arp.h> +#include <linux/if_vlan.h> #include "net-sysfs.h" @@ -1362,6 +1363,29 @@ void netif_device_attach(struct net_device *dev) } EXPORT_SYMBOL(netif_device_attach); +static bool can_checksum_protocol(unsigned long features, __be16 protocol) +{ + return ((features & NETIF_F_GEN_CSUM) || + ((features & NETIF_F_IP_CSUM) && + protocol == htons(ETH_P_IP)) || + ((features & NETIF_F_IPV6_CSUM) && + protocol == htons(ETH_P_IPV6))); +} + +static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb) +{ + if (can_checksum_protocol(dev->features, skb->protocol)) + return true; + + if (skb->protocol == htons(ETH_P_8021Q)) { + struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + if (can_checksum_protocol(dev->features & dev->vlan_features, + veh->h_vlan_encapsulated_proto)) + return true; + } + + return false; +} /* * Invalidate hardware checksum when packet is to be mangled, and @@ -1640,14 +1664,8 @@ int dev_queue_xmit(struct sk_buff *skb) if (skb->ip_summed == CHECKSUM_PARTIAL) { skb_set_transport_header(skb, skb->csum_start - skb_headroom(skb)); - - if (!(dev->features & NETIF_F_GEN_CSUM) && - !((dev->features & NETIF_F_IP_CSUM) && - skb->protocol == htons(ETH_P_IP)) && - !((dev->features & NETIF_F_IPV6_CSUM) && - skb->protocol == htons(ETH_P_IPV6))) - if (skb_checksum_help(skb)) - goto out_kfree_skb; + if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb)) + goto out_kfree_skb; } gso: @@ -2059,6 +2077,10 @@ int netif_receive_skb(struct sk_buff *skb) rcu_read_lock(); + /* Don't receive packets in an exiting network namespace */ + if (!net_alive(dev_net(skb->dev))) + goto out; + #ifdef CONFIG_NET_CLS_ACT if (skb->tc_verd & TC_NCLS) { skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index 72b4c184dd84..7c52fe277b62 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -140,6 +140,9 @@ static void cleanup_net(struct work_struct *work) struct pernet_operations *ops; struct net *net; + /* Be very certain incoming network packets will not find us */ + rcu_barrier(); + net = container_of(work, struct net, work); mutex_lock(&net_mutex); diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 6de4bd195d28..1e8be246ad15 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -290,12 +290,12 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, while (1) { const u8 len = dccp_ackvec_len(av, index); - const u8 state = dccp_ackvec_state(av, index); + const u8 av_state = dccp_ackvec_state(av, index); /* * valid packets not yet in av_buf have a reserved * entry, with a len equal to 0. */ - if (state == DCCP_ACKVEC_STATE_NOT_RECEIVED && + if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED && len == 0 && delta == 0) { /* Found our reserved seat! */ dccp_pr_debug("Found %llu reserved seat!\n", @@ -325,31 +325,6 @@ out_duplicate: return -EILSEQ; } -#ifdef CONFIG_IP_DCCP_DEBUG -void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) -{ - dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, - (unsigned long long)ackno); - - while (len--) { - const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; - const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; - - dccp_pr_debug_cat("%d,%d|", state, rl); - ++vector; - } - - dccp_pr_debug_cat("\n"); -} - -void dccp_ackvec_print(const struct dccp_ackvec *av) -{ - dccp_ackvector_print(av->av_buf_ackno, - av->av_buf + av->av_buf_head, - av->av_vec_len); -} -#endif - static void dccp_ackvec_throw_record(struct dccp_ackvec *av, struct dccp_ackvec_record *avr) { diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index f813077234b7..a1929f33d703 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c @@ -159,8 +159,8 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) - (s64)hctx->ccid3hctx_rtt >= 0) { - hctx->ccid3hctx_x = - max(min(2 * hctx->ccid3hctx_x, min_rate), + hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate); + hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, scaled_div(((__u64)hctx->ccid3hctx_s) << 6, hctx->ccid3hctx_rtt)); hctx->ccid3hctx_t_ld = now; @@ -329,8 +329,14 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) hctx->ccid3hctx_x = rfc3390_initial_rate(sk); hctx->ccid3hctx_t_ld = now; } else { - /* Sender does not have RTT sample: X_pps = 1 pkt/sec */ - hctx->ccid3hctx_x = hctx->ccid3hctx_s; + /* + * Sender does not have RTT sample: + * - set fallback RTT (RFC 4340, 3.4) since a RTT value + * is needed in several parts (e.g. window counter); + * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. + */ + hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT; + hctx->ccid3hctx_x = hctx->ccid3hctx_s; hctx->ccid3hctx_x <<= 6; } ccid3_update_send_interval(hctx); diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index d1dfbb8de64c..97ecec0a8e76 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c @@ -14,14 +14,6 @@ module_param(tfrc_debug, bool, 0444); MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); #endif -extern int tfrc_tx_packet_history_init(void); -extern void tfrc_tx_packet_history_exit(void); -extern int tfrc_rx_packet_history_init(void); -extern void tfrc_rx_packet_history_exit(void); - -extern int tfrc_li_init(void); -extern void tfrc_li_exit(void); - static int __init tfrc_module_init(void) { int rc = tfrc_li_init(); diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index 1fb1187bbf1c..ed9857527acf 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h @@ -15,7 +15,7 @@ * (at your option) any later version. */ #include <linux/types.h> -#include <asm/div64.h> +#include <linux/math64.h> #include "../../dccp.h" /* internal includes that this module exports: */ #include "loss_interval.h" @@ -29,21 +29,19 @@ extern int tfrc_debug; #endif /* integer-arithmetic divisions of type (a * 1000000)/b */ -static inline u64 scaled_div(u64 a, u32 b) +static inline u64 scaled_div(u64 a, u64 b) { BUG_ON(b==0); - a *= 1000000; - do_div(a, b); - return a; + return div64_u64(a * 1000000, b); } -static inline u32 scaled_div32(u64 a, u32 b) +static inline u32 scaled_div32(u64 a, u64 b) { u64 result = scaled_div(a, b); if (result > UINT_MAX) { - DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U", - (unsigned long long)a, b); + DCCP_CRIT("Overflow: %llu/%llu > UINT_MAX", + (unsigned long long)a, (unsigned long long)b); return UINT_MAX; } return result; @@ -58,7 +56,14 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight) return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval; } -extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); -extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); +extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); +extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); +extern int tfrc_tx_packet_history_init(void); +extern void tfrc_tx_packet_history_exit(void); +extern int tfrc_rx_packet_history_init(void); +extern void tfrc_rx_packet_history_exit(void); + +extern int tfrc_li_init(void); +extern void tfrc_li_exit(void); #endif /* _TFRC_H_ */ diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index e4e64b76c10c..2f20a29cffe4 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c @@ -661,7 +661,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) EXPORT_SYMBOL_GPL(tfrc_calc_x); -/* +/** * tfrc_calc_x_reverse_lookup - try to find p given f(p) * * @fvalue: function value to match, scaled by 1000000 @@ -676,11 +676,11 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) /* Error cases. */ if (fvalue < tfrc_calc_x_lookup[0][1]) { - DCCP_WARN("fvalue %d smaller than resolution\n", fvalue); - return tfrc_calc_x_lookup[0][1]; + DCCP_WARN("fvalue %u smaller than resolution\n", fvalue); + return TFRC_SMALLEST_P; } if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) { - DCCP_WARN("fvalue %d exceeds bounds!\n", fvalue); + DCCP_WARN("fvalue %u exceeds bounds!\n", fvalue); return 1000000; } diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c index c22a3780c14e..37d27bcb361f 100644 --- a/net/dccp/ipv4.c +++ b/net/dccp/ipv4.c @@ -589,7 +589,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = reqsk_alloc(&dccp_request_sock_ops); + req = inet_reqsk_alloc(&dccp_request_sock_ops); if (req == NULL) goto drop; @@ -605,7 +605,6 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; - ireq->opt = NULL; /* * Step 3: Process LISTEN state diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c index 9b1129bb7ece..f7fe2a572d7b 100644 --- a/net/dccp/ipv6.c +++ b/net/dccp/ipv6.c @@ -421,7 +421,6 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb) ireq6 = inet6_rsk(req); ipv6_addr_copy(&ireq6->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&ireq6->loc_addr, &ipv6_hdr(skb)->daddr); - ireq6->pktopts = NULL; if (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo || diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 33ad48321b08..66dca5bba858 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -165,12 +165,12 @@ out_free: /* See dccp_v4_conn_request */ newdmsk->dccpms_sequence_window = req->rcv_wnd; - newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr; - dccp_update_gsr(newsk, dreq->dreq_isr); - - newdp->dccps_iss = dreq->dreq_iss; + newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; dccp_update_gss(newsk, dreq->dreq_iss); + newdp->dccps_isr = dreq->dreq_isr; + dccp_update_gsr(newsk, dreq->dreq_isr); + /* * SWL and AWL are initially adjusted so that they are not less than * the initial Sequence Numbers received and sent, respectively: diff --git a/net/dccp/options.c b/net/dccp/options.c index d2a84a2fecee..43bc24e761d0 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c @@ -107,9 +107,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, * * CCID-specific options are ignored during connection setup, as * negotiation may still be in progress (see RFC 4340, 10.3). + * The same applies to Ack Vectors, as these depend on the CCID. * */ - if (dreq != NULL && opt >= 128) + if (dreq != NULL && (opt >= 128 || + opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1)) goto ignore_option; switch (opt) { diff --git a/net/dccp/output.c b/net/dccp/output.c index 1f8a9b64c083..fe20068c5d8e 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -508,6 +508,7 @@ void dccp_send_ack(struct sock *sk) EXPORT_SYMBOL_GPL(dccp_send_ack); +#if 0 /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ void dccp_send_delayed_ack(struct sock *sk) { @@ -538,6 +539,7 @@ void dccp_send_delayed_ack(struct sock *sk) icsk->icsk_ack.timeout = timeout; sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); } +#endif void dccp_send_sync(struct sock *sk, const u64 ackno, const enum dccp_pkt_type pkt_type) diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 0bcdc9250279..81368a7f5379 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c @@ -42,7 +42,7 @@ static int bufsize = 64 * 1024; static const char procname[] = "dccpprobe"; -struct { +static struct { struct kfifo *fifo; spinlock_t lock; wait_queue_head_t wait; diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 3b83c34019fc..0d4d72827e4b 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -960,7 +960,10 @@ int fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, rtm->rtm_dst_len = dst_len; rtm->rtm_src_len = 0; rtm->rtm_tos = tos; - rtm->rtm_table = tb_id; + if (tb_id < 256) + rtm->rtm_table = tb_id; + else + rtm->rtm_table = RT_TABLE_COMPAT; NLA_PUT_U32(skb, RTA_TABLE, tb_id); rtm->rtm_type = type; rtm->rtm_flags = fi->fib_flags; diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 828ea211ff21..ec834480abe7 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -419,7 +419,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, struct inet_connection_sock *icsk = inet_csk(parent); struct request_sock_queue *queue = &icsk->icsk_accept_queue; struct listen_sock *lopt = queue->listen_opt; - int thresh = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; + int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; + int thresh = max_retries; unsigned long now = jiffies; struct request_sock **reqp, *req; int i, budget; @@ -455,6 +456,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, } } + if (queue->rskq_defer_accept) + max_retries = queue->rskq_defer_accept; + budget = 2 * (lopt->nr_table_entries / (timeout / interval)); i = lopt->clock_hand; @@ -462,8 +466,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, reqp=&lopt->syn_table[i]; while ((req = *reqp) != NULL) { if (time_after_eq(now, req->expires)) { - if (req->retrans < thresh && - !req->rsk_ops->rtx_syn_ack(parent, req)) { + if ((req->retrans < thresh || + (inet_rsk(req)->acked && req->retrans < max_retries)) + && !req->rsk_ops->rtx_syn_ack(parent, req)) { unsigned long timeo; if (req->retrans++ == 0) diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 04578593e100..d2a887fc8d9b 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -556,7 +556,6 @@ static void nf_nat_cleanup_conntrack(struct nf_conn *ct) spin_lock_bh(&nf_nat_lock); hlist_del_rcu(&nat->bysource); - nat->ct = NULL; spin_unlock_bh(&nf_nat_lock); } @@ -570,8 +569,8 @@ static void nf_nat_move_storage(void *new, void *old) return; spin_lock_bh(&nf_nat_lock); - hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); new_nat->ct = ct; + hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); spin_unlock_bh(&nf_nat_lock); } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index e7e091d365ff..37a1ecd9d600 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -934,7 +934,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i) srcp = inet->num; seq_printf(seq, "%4d: %08X:%04X %08X:%04X" - " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d", + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d\n", i, src, srcp, dest, destp, sp->sk_state, atomic_read(&sp->sk_wmem_alloc), atomic_read(&sp->sk_rmem_alloc), diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 73ba98921d64..d182a2a26291 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -285,7 +285,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, cookie_check_timestamp(&tcp_opt); ret = NULL; - req = reqsk_alloc(&tcp_request_sock_ops); /* for safety */ + req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) goto out; @@ -301,7 +301,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; - ireq->opt = NULL; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->rcv_wscale = tcp_opt.rcv_wscale; ireq->sack_ok = tcp_opt.sack_ok; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ab66683b8043..fc54a48fde1e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2112,12 +2112,15 @@ static int do_tcp_setsockopt(struct sock *sk, int level, break; case TCP_DEFER_ACCEPT: - if (val < 0) { - err = -EINVAL; - } else { - if (val > MAX_TCP_ACCEPT_DEFERRED) - val = MAX_TCP_ACCEPT_DEFERRED; - icsk->icsk_accept_queue.rskq_defer_accept = val; + icsk->icsk_accept_queue.rskq_defer_accept = 0; + if (val > 0) { + /* Translate value in seconds to number of + * retransmits */ + while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && + val > ((TCP_TIMEOUT_INIT / HZ) << + icsk->icsk_accept_queue.rskq_defer_accept)) + icsk->icsk_accept_queue.rskq_defer_accept++; + icsk->icsk_accept_queue.rskq_defer_accept++; } break; @@ -2299,7 +2302,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = (val ? : sysctl_tcp_fin_timeout) / HZ; break; case TCP_DEFER_ACCEPT: - val = icsk->icsk_accept_queue.rskq_defer_accept; + val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : + ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); break; case TCP_WINDOW_CLAMP: val = tp->window_clamp; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eba873e9b560..cad73b7dfef0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4541,49 +4541,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) } } -static int tcp_defer_accept_check(struct sock *sk) -{ - struct tcp_sock *tp = tcp_sk(sk); - - if (tp->defer_tcp_accept.request) { - int queued_data = tp->rcv_nxt - tp->copied_seq; - int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ? - tcp_hdr((struct sk_buff *) - sk->sk_receive_queue.prev)->fin : 0; - - if (queued_data && hasfin) - queued_data--; - - if (queued_data && - tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) { - if (sock_flag(sk, SOCK_KEEPOPEN)) { - inet_csk_reset_keepalive_timer(sk, - keepalive_time_when(tp)); - } else { - inet_csk_delete_keepalive_timer(sk); - } - - inet_csk_reqsk_queue_add( - tp->defer_tcp_accept.listen_sk, - tp->defer_tcp_accept.request, - sk); - - tp->defer_tcp_accept.listen_sk->sk_data_ready( - tp->defer_tcp_accept.listen_sk, 0); - - sock_put(tp->defer_tcp_accept.listen_sk); - sock_put(sk); - tp->defer_tcp_accept.listen_sk = NULL; - tp->defer_tcp_accept.request = NULL; - } else if (hasfin || - tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) { - tcp_reset(sk); - return -1; - } - } - return 0; -} - static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) { struct tcp_sock *tp = tcp_sk(sk); @@ -4944,8 +4901,6 @@ step5: tcp_data_snd_check(sk); tcp_ack_snd_check(sk); - - tcp_defer_accept_check(sk); return 0; csum_error: diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cd601a866c2f..12695be2c255 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -85,10 +85,6 @@ int sysctl_tcp_tw_reuse __read_mostly; int sysctl_tcp_low_latency __read_mostly; -/* Check TCP sequence numbers in ICMP packets. */ -#define ICMP_MIN_LENGTH 8 - -void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); #ifdef CONFIG_TCP_MD5SIG static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, @@ -1285,7 +1281,7 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) goto drop; - req = reqsk_alloc(&tcp_request_sock_ops); + req = inet_reqsk_alloc(&tcp_request_sock_ops); if (!req) goto drop; @@ -1918,14 +1914,6 @@ int tcp_v4_destroy_sock(struct sock *sk) sk->sk_sndmsg_page = NULL; } - if (tp->defer_tcp_accept.request) { - reqsk_free(tp->defer_tcp_accept.request); - sock_put(tp->defer_tcp_accept.listen_sk); - sock_put(sk); - tp->defer_tcp_accept.listen_sk = NULL; - tp->defer_tcp_accept.request = NULL; - } - atomic_dec(&tcp_sockets_allocated); return 0; diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 019c8c16e5cc..8245247a6ceb 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -571,8 +571,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, does sequence test, SYN is truncated, and thus we consider it a bare ACK. - Both ends (listening sockets) accept the new incoming - connection and try to talk to each other. 8-) + If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this + bare ACK. Otherwise, we create an established connection. Both + ends (listening sockets) accept the new incoming connection and try + to talk to each other. 8-) Note: This case is both harmless, and rare. Possibility is about the same as us discovering intelligent life on another plant tomorrow. @@ -640,6 +642,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, if (!(flg & TCP_FLAG_ACK)) return NULL; + /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ + if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && + TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { + inet_rsk(req)->acked = 1; + return NULL; + } + /* OK, ACK is valid, create big socket and * feed this segment to it. It will repeat all * the tests. THIS SEGMENT MUST MOVE SOCKET TO @@ -678,24 +687,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, inet_csk_reqsk_queue_unlink(sk, req, prev); inet_csk_reqsk_queue_removed(sk, req); - if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && - TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { - - /* the accept queue handling is done is est recv slow - * path so lets make sure to start there - */ - tcp_sk(child)->pred_flags = 0; - sock_hold(sk); - sock_hold(child); - tcp_sk(child)->defer_tcp_accept.listen_sk = sk; - tcp_sk(child)->defer_tcp_accept.request = req; - - inet_csk_reset_keepalive_timer(child, - inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ); - } else { - inet_csk_reqsk_queue_add(sk, req, child); - } - + inet_csk_reqsk_queue_add(sk, req, child); return child; listen_overflow: diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 4de68cf5f2aa..63ed9d6830e7 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -489,11 +489,6 @@ static void tcp_keepalive_timer (unsigned long data) goto death; } - if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) { - tcp_send_active_reset(sk, GFP_ATOMIC); - goto death; - } - if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) goto out; diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index 584e6d74e3a9..7135279f3f84 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -52,7 +52,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) IP_ECN_clear(top_iph); top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? - 0 : XFRM_MODE_SKB_CB(skb)->frag_off; + 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); ip_select_ident(top_iph, dst->child, NULL); top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT); diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3c6aafb02183..e84b3fd17fb4 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -191,7 +191,7 @@ lookup_protocol: np->mcast_hops = -1; np->mc_loop = 1; np->pmtudisc = IPV6_PMTUDISC_WANT; - np->ipv6only = init_net.ipv6.sysctl.bindv6only; + np->ipv6only = net->ipv6.sysctl.bindv6only; /* Init the ipv4 part of the socket since we can have sockets * using v6 API for ipv4. diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index b9c2de84a8a2..0f0f94a40335 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -705,6 +705,11 @@ int datagram_send_ctl(struct net *net, } *hlimit = *(int *)CMSG_DATA(cmsg); + if (*hlimit < -1 || *hlimit > 0xff) { + err = -EINVAL; + goto exit_f; + } + break; case IPV6_TCLASS: diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 4e5c8615832c..17eb48b8e329 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -102,6 +102,15 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt if (hdr->version != 6) goto err; + /* + * RFC4291 2.5.3 + * A packet received on an interface with a destination address + * of loopback must be dropped. + */ + if (!(dev->flags & IFF_LOOPBACK) && + ipv6_addr_loopback(&hdr->daddr)) + goto err; + skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 26b83e512a09..86e28a75267f 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -67,7 +67,7 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *)) /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) - return -EINVAL; + return -ENOPROTOOPT; new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; @@ -345,18 +345,21 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname, case IPV6_DSTOPTS: { struct ipv6_txoptions *opt; + + /* remove any sticky options header with a zero option + * length, per RFC3542. + */ if (optlen == 0) optval = NULL; + else if (optlen < sizeof(struct ipv6_opt_hdr) || + optlen & 0x7 || optlen > 8 * 255) + goto e_inval; /* hop-by-hop / destination options are privileged option */ retv = -EPERM; if (optname != IPV6_RTHDR && !capable(CAP_NET_RAW)) break; - if (optlen < sizeof(struct ipv6_opt_hdr) || - optlen & 0x7 || optlen > 8 * 255) - goto e_inval; - opt = ipv6_renew_options(sk, np->opt, optname, (struct ipv6_opt_hdr __user *)optval, optlen); @@ -446,7 +449,7 @@ done: case IPV6_MULTICAST_HOPS: if (sk->sk_type == SOCK_STREAM) - goto e_inval; + break; if (optlen < sizeof(int)) goto e_inval; if (val > 255 || val < -1) @@ -458,13 +461,15 @@ done: case IPV6_MULTICAST_LOOP: if (optlen < sizeof(int)) goto e_inval; + if (val != valbool) + goto e_inval; np->mc_loop = valbool; retv = 0; break; case IPV6_MULTICAST_IF: if (sk->sk_type == SOCK_STREAM) - goto e_inval; + break; if (optlen < sizeof(int)) goto e_inval; @@ -860,7 +865,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (sk->sk_protocol != IPPROTO_UDP && sk->sk_protocol != IPPROTO_UDPLITE && sk->sk_protocol != IPPROTO_TCP) - return -EINVAL; + return -ENOPROTOOPT; if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; val = sk->sk_family; @@ -874,6 +879,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, return -EINVAL; if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) return -EFAULT; + if (gsf.gf_group.ss_family != AF_INET6) + return -EADDRNOTAVAIL; lock_sock(sk); err = ip6_mc_msfget(sk, &gsf, (struct group_filter __user *)optval, optlen); diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8fee9a15b2d3..3aee12310d94 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1169,7 +1169,8 @@ static int raw6_destroy(struct sock *sk) lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); - return 0; + + return inet6_destroy_sock(sk); } static int rawv6_init_sk(struct sock *sk) @@ -1200,7 +1201,6 @@ struct proto rawv6_prot = { .disconnect = udp_disconnect, .ioctl = rawv6_ioctl, .init = rawv6_init_sk, - .destroy = inet6_destroy_sock, .setsockopt = rawv6_setsockopt, .getsockopt = rawv6_getsockopt, .sendmsg = rawv6_sendmsg, diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 220cffe9e63b..d1f3e19b06c7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -2196,8 +2196,12 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); - expires = (rt->rt6i_flags & RTF_EXPIRES) ? - rt->rt6i_expires - jiffies : 0; + if (!(rt->rt6i_flags & RTF_EXPIRES)) + expires = 0; + else if (rt->rt6i_expires - jiffies < INT_MAX) + expires = rt->rt6i_expires - jiffies; + else + expires = INT_MAX; if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, expires, rt->u.dst.error) < 0) diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 3de6ffdaedf2..32e871a6c25a 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -222,15 +222,18 @@ __ipip6_tunnel_locate_prl(struct ip_tunnel *t, __be32 addr) } -static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) +static int ipip6_tunnel_get_prl(struct ip_tunnel *t, + struct ip_tunnel_prl __user *a) { - struct ip_tunnel_prl *kp; + struct ip_tunnel_prl kprl, *kp; struct ip_tunnel_prl_entry *prl; unsigned int cmax, c = 0, ca, len; int ret = 0; - cmax = a->datalen / sizeof(*a); - if (cmax > 1 && a->addr != htonl(INADDR_ANY)) + if (copy_from_user(&kprl, a, sizeof(kprl))) + return -EFAULT; + cmax = kprl.datalen / sizeof(kprl); + if (cmax > 1 && kprl.addr != htonl(INADDR_ANY)) cmax = 1; /* For simple GET or for root users, @@ -261,26 +264,25 @@ static int ipip6_tunnel_get_prl(struct ip_tunnel *t, struct ip_tunnel_prl *a) for (prl = t->prl; prl; prl = prl->next) { if (c > cmax) break; - if (a->addr != htonl(INADDR_ANY) && prl->addr != a->addr) + if (kprl.addr != htonl(INADDR_ANY) && prl->addr != kprl.addr) continue; kp[c].addr = prl->addr; kp[c].flags = prl->flags; c++; - if (a->addr != htonl(INADDR_ANY)) + if (kprl.addr != htonl(INADDR_ANY)) break; } out: read_unlock(&ipip6_lock); len = sizeof(*kp) * c; - ret = len ? copy_to_user(a->data, kp, len) : 0; + ret = 0; + if ((len && copy_to_user(a + 1, kp, len)) || put_user(len, &a->datalen)) + ret = -EFAULT; kfree(kp); - if (ret) - return -EFAULT; - a->datalen = len; - return 0; + return ret; } static int @@ -873,11 +875,20 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) break; case SIOCGETPRL: + err = -EINVAL; + if (dev == sitn->fb_tunnel_dev) + goto done; + err = -ENOENT; + if (!(t = netdev_priv(dev))) + goto done; + err = ipip6_tunnel_get_prl(t, ifr->ifr_ifru.ifru_data); + break; + case SIOCADDPRL: case SIOCDELPRL: case SIOCCHGPRL: err = -EPERM; - if (cmd != SIOCGETPRL && !capable(CAP_NET_ADMIN)) + if (!capable(CAP_NET_ADMIN)) goto done; err = -EINVAL; if (dev == sitn->fb_tunnel_dev) @@ -890,12 +901,6 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) goto done; switch (cmd) { - case SIOCGETPRL: - err = ipip6_tunnel_get_prl(t, &prl); - if (!err && copy_to_user(ifr->ifr_ifru.ifru_data, - &prl, sizeof(prl))) - err = -EFAULT; - break; case SIOCDELPRL: err = ipip6_tunnel_del_prl(t, &prl); break; @@ -904,8 +909,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd) err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL); break; } - if (cmd != SIOCGETPRL) - netdev_state_change(dev); + netdev_state_change(dev); break; default: diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 938ce4ecde55..3ecc1157994e 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -198,7 +198,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq = inet_rsk(req); ireq6 = inet6_rsk(req); treq = tcp_rsk(req); - ireq6->pktopts = NULL; if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 715965f0fac0..cb46749d4c32 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1299,7 +1299,6 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) treq = inet6_rsk(req); ipv6_addr_copy(&treq->rmt_addr, &ipv6_hdr(skb)->saddr); ipv6_addr_copy(&treq->loc_addr, &ipv6_hdr(skb)->daddr); - treq->pktopts = NULL; if (!want_cookie) TCP_ECN_create_request(req, tcp_hdr(skb)); diff --git a/net/key/af_key.c b/net/key/af_key.c index 9bba7ac5fee0..7470e367272b 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -3030,6 +3030,9 @@ static int key_notify_sa_expire(struct xfrm_state *x, struct km_event *c) static int pfkey_send_notify(struct xfrm_state *x, struct km_event *c) { + if (atomic_read(&pfkey_socks_nr) == 0) + return 0; + switch (c->event) { case XFRM_MSG_EXPIRE: return key_notify_sa_expire(x, c); diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index c7314bf4bec2..006486b26726 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -899,7 +899,7 @@ extern const struct iw_handler_def ieee80211_iw_handler_def; /* ieee80211_ioctl.c */ -int ieee80211_set_freq(struct ieee80211_local *local, int freq); +int ieee80211_set_freq(struct net_device *dev, int freq); /* ieee80211_sta.c */ void ieee80211_sta_timer(unsigned long data); void ieee80211_sta_work(struct work_struct *work); diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 5c876450b14c..98c0b5e56ecc 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -511,6 +511,7 @@ static int ieee80211_stop(struct net_device *dev) case IEEE80211_IF_TYPE_STA: case IEEE80211_IF_TYPE_IBSS: sdata->u.sta.state = IEEE80211_DISABLED; + memset(sdata->u.sta.bssid, 0, ETH_ALEN); del_timer_sync(&sdata->u.sta.timer); /* * When we get here, the interface is marked down. diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 841278f1df8e..4d2b582dd055 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -44,7 +44,7 @@ #define IEEE80211_RETRY_AUTH_INTERVAL (1 * HZ) #define IEEE80211_SCAN_INTERVAL (2 * HZ) #define IEEE80211_SCAN_INTERVAL_SLOW (15 * HZ) -#define IEEE80211_IBSS_JOIN_TIMEOUT (20 * HZ) +#define IEEE80211_IBSS_JOIN_TIMEOUT (7 * HZ) #define IEEE80211_PROBE_DELAY (HZ / 33) #define IEEE80211_CHANNEL_TIME (HZ / 33) @@ -2336,6 +2336,7 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, u8 *pos; struct ieee80211_sub_if_data *sdata; struct ieee80211_supported_band *sband; + union iwreq_data wrqu; sband = local->hw.wiphy->bands[local->hw.conf.channel->band]; @@ -2358,13 +2359,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, sdata->drop_unencrypted = bss->capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0; - res = ieee80211_set_freq(local, bss->freq); + res = ieee80211_set_freq(dev, bss->freq); - if (local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS) { - printk(KERN_DEBUG "%s: IBSS not allowed on frequency " - "%d MHz\n", dev->name, local->oper_channel->center_freq); - return -1; - } + if (res) + return res; /* Set beacon template */ skb = dev_alloc_skb(local->hw.extra_tx_headroom + 400); @@ -2479,6 +2477,10 @@ static int ieee80211_sta_join_ibss(struct net_device *dev, ifsta->state = IEEE80211_IBSS_JOINED; mod_timer(&ifsta->timer, jiffies + IEEE80211_IBSS_MERGE_INTERVAL); + memset(&wrqu, 0, sizeof(wrqu)); + memcpy(wrqu.ap_addr.sa_data, bss->bssid, ETH_ALEN); + wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); + return res; } @@ -3486,7 +3488,7 @@ static int ieee80211_sta_config_auth(struct net_device *dev, spin_unlock_bh(&local->sta_bss_lock); if (selected) { - ieee80211_set_freq(local, selected->freq); + ieee80211_set_freq(dev, selected->freq); if (!(ifsta->flags & IEEE80211_STA_SSID_SET)) ieee80211_sta_set_ssid(dev, selected->ssid, selected->ssid_len); diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 1d7dd54aacef..c80d5899f279 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1132,7 +1132,7 @@ static int ieee80211_tx(struct net_device *dev, struct sk_buff *skb, ieee80211_tx_handler *handler; struct ieee80211_tx_data tx; ieee80211_tx_result res = TX_DROP, res_prepare; - int ret, i; + int ret, i, retries = 0; WARN_ON(__ieee80211_queue_pending(local, control->queue)); @@ -1216,6 +1216,13 @@ retry: if (!__ieee80211_queue_stopped(local, control->queue)) { clear_bit(IEEE80211_LINK_STATE_PENDING, &local->state[control->queue]); + retries++; + /* + * Driver bug, it's rejecting packets but + * not stopping queues. + */ + if (WARN_ON_ONCE(retries > 5)) + goto drop; goto retry; } memcpy(&store->control, control, @@ -1562,13 +1569,13 @@ int ieee80211_subif_start_xmit(struct sk_buff *skb, * be cloned. This could happen, e.g., with Linux bridge code passing * us broadcast frames. */ - if (head_need > 0 || skb_header_cloned(skb)) { + if (head_need > 0 || skb_cloned(skb)) { #if 0 printk(KERN_DEBUG "%s: need to reallocate buffer for %d bytes " "of headroom\n", dev->name, head_need); #endif - if (skb_header_cloned(skb)) + if (skb_cloned(skb)) I802_DEBUG_INC(local->tx_expand_skb_head_cloned); else I802_DEBUG_INC(local->tx_expand_skb_head); diff --git a/net/mac80211/wext.c b/net/mac80211/wext.c index 8311bb24f9f3..6106cb79060c 100644 --- a/net/mac80211/wext.c +++ b/net/mac80211/wext.c @@ -290,14 +290,22 @@ static int ieee80211_ioctl_giwmode(struct net_device *dev, return 0; } -int ieee80211_set_freq(struct ieee80211_local *local, int freqMHz) +int ieee80211_set_freq(struct net_device *dev, int freqMHz) { int ret = -EINVAL; struct ieee80211_channel *chan; + struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); chan = ieee80211_get_channel(local->hw.wiphy, freqMHz); if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { + if (sdata->vif.type == IEEE80211_IF_TYPE_IBSS && + chan->flags & IEEE80211_CHAN_NO_IBSS) { + printk(KERN_DEBUG "%s: IBSS not allowed on frequency " + "%d MHz\n", dev->name, chan->center_freq); + return ret; + } local->oper_channel = chan; if (local->sta_sw_scanning || local->sta_hw_scanning) @@ -315,7 +323,6 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, struct iw_request_info *info, struct iw_freq *freq, char *extra) { - struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type == IEEE80211_IF_TYPE_STA) @@ -329,14 +336,14 @@ static int ieee80211_ioctl_siwfreq(struct net_device *dev, IEEE80211_STA_AUTO_CHANNEL_SEL; return 0; } else - return ieee80211_set_freq(local, + return ieee80211_set_freq(dev, ieee80211_channel_to_frequency(freq->m)); } else { int i, div = 1000000; for (i = 0; i < freq->e; i++) div /= 10; if (div > 0) - return ieee80211_set_freq(local, freq->m / div); + return ieee80211_set_freq(dev, freq->m / div); else return -EINVAL; } @@ -489,7 +496,8 @@ static int ieee80211_ioctl_giwap(struct net_device *dev, sdata = IEEE80211_DEV_TO_SUB_IF(dev); if (sdata->vif.type == IEEE80211_IF_TYPE_STA || sdata->vif.type == IEEE80211_IF_TYPE_IBSS) { - if (sdata->u.sta.state == IEEE80211_ASSOCIATED) { + if (sdata->u.sta.state == IEEE80211_ASSOCIATED || + sdata->u.sta.state == IEEE80211_IBSS_JOINED) { ap_addr->sa_family = ARPHRD_ETHER; memcpy(&ap_addr->sa_data, sdata->u.sta.bssid, ETH_ALEN); return 0; diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c index dc1598b86004..635b996c8c35 100644 --- a/net/mac80211/wme.c +++ b/net/mac80211/wme.c @@ -673,7 +673,7 @@ int ieee80211_ht_agg_queue_add(struct ieee80211_local *local, #ifdef CONFIG_MAC80211_HT_DEBUG if (net_ratelimit()) printk(KERN_DEBUG "allocated aggregation queue" - " %d tid %d addr %s pool=0x%lX", + " %d tid %d addr %s pool=0x%lX\n", i, tid, print_mac(mac, sta->addr), q->qdisc_pool[0]); #endif /* CONFIG_MAC80211_HT_DEBUG */ diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c4b1799da5d7..662c1ccfee26 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -196,8 +196,6 @@ destroy_conntrack(struct nf_conntrack *nfct) if (l4proto && l4proto->destroy) l4proto->destroy(ct); - nf_ct_ext_destroy(ct); - rcu_read_unlock(); spin_lock_bh(&nf_conntrack_lock); @@ -520,6 +518,7 @@ static void nf_conntrack_free_rcu(struct rcu_head *head) void nf_conntrack_free(struct nf_conn *ct) { + nf_ct_ext_destroy(ct); call_rcu(&ct->rcu, nf_conntrack_free_rcu); } EXPORT_SYMBOL_GPL(nf_conntrack_free); diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index bcc19fa4ed1e..8a3f8b34e466 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -59,12 +59,19 @@ nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp) if (!*ext) return NULL; + INIT_RCU_HEAD(&(*ext)->rcu); (*ext)->offset[id] = off; (*ext)->len = len; return (void *)(*ext) + off; } +static void __nf_ct_ext_free_rcu(struct rcu_head *head) +{ + struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu); + kfree(ext); +} + void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) { struct nf_ct_ext *new; @@ -106,7 +113,7 @@ void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) (void *)ct->ext + ct->ext->offset[i]); rcu_read_unlock(); } - kfree(ct->ext); + call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu); ct->ext = new; } diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c index 95da1a24aab7..2f83c158934d 100644 --- a/net/netfilter/nf_conntrack_h323_main.c +++ b/net/netfilter/nf_conntrack_h323_main.c @@ -619,6 +619,7 @@ static const struct nf_conntrack_expect_policy h245_exp_policy = { static struct nf_conntrack_helper nf_conntrack_helper_h245 __read_mostly = { .name = "H.245", .me = THIS_MODULE, + .tuple.src.l3num = AF_UNSPEC, .tuple.dst.protonum = IPPROTO_UDP, .help = h245_help, .expect_policy = &h245_exp_policy, @@ -1765,6 +1766,7 @@ static void __exit nf_conntrack_h323_fini(void) nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); + nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); kfree(h323_buffer); pr_debug("nf_ct_h323: fini\n"); } @@ -1777,28 +1779,34 @@ static int __init nf_conntrack_h323_init(void) h323_buffer = kmalloc(65536, GFP_KERNEL); if (!h323_buffer) return -ENOMEM; - ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); + ret = nf_conntrack_helper_register(&nf_conntrack_helper_h245); if (ret < 0) goto err1; - ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); + ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[0]); if (ret < 0) goto err2; - ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); + ret = nf_conntrack_helper_register(&nf_conntrack_helper_q931[1]); if (ret < 0) goto err3; - ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); + ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[0]); if (ret < 0) goto err4; + ret = nf_conntrack_helper_register(&nf_conntrack_helper_ras[1]); + if (ret < 0) + goto err5; pr_debug("nf_ct_h323: init success\n"); return 0; -err4: +err5: nf_conntrack_helper_unregister(&nf_conntrack_helper_ras[0]); -err3: +err4: nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[1]); -err2: +err3: nf_conntrack_helper_unregister(&nf_conntrack_helper_q931[0]); +err2: + nf_conntrack_helper_unregister(&nf_conntrack_helper_h245); err1: + kfree(h323_buffer); return ret; } diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index bc11d7092032..9fda6ee95a31 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c @@ -92,10 +92,6 @@ void nf_log_packet(int pf, vsnprintf(prefix, sizeof(prefix), fmt, args); va_end(args); logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); - } else if (net_ratelimit()) { - printk(KERN_WARNING "nf_log_packet: can\'t log since " - "no backend logging module loaded in! Please either " - "load one, or disable logging explicitly\n"); } rcu_read_unlock(); } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f5aa23c3e886..3e1191cecaf0 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -444,8 +444,11 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) if (ops->dumpit == NULL) return -EOPNOTSUPP; - return netlink_dump_start(genl_sock, skb, nlh, - ops->dumpit, ops->done); + genl_unlock(); + err = netlink_dump_start(genl_sock, skb, nlh, + ops->dumpit, ops->done); + genl_lock(); + return err; } if (ops->doit == NULL) @@ -603,9 +606,6 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) int chains_to_skip = cb->args[0]; int fams_to_skip = cb->args[1]; - if (chains_to_skip != 0) - genl_lock(); - for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { if (i < chains_to_skip) continue; @@ -623,9 +623,6 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) } errout: - if (chains_to_skip != 0) - genl_unlock(); - cb->args[0] = i; cb->args[1] = n; @@ -770,7 +767,7 @@ static int __init genl_init(void) /* we'll bump the group number right afterwards */ genl_sock = netlink_kernel_create(&init_net, NETLINK_GENERIC, 0, - genl_rcv, NULL, THIS_MODULE); + genl_rcv, &genl_mutex, THIS_MODULE); if (genl_sock == NULL) panic("GENL: Cannot initialize generic netlink\n"); diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 5bc1ed490180..6807c97985a5 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -28,6 +28,7 @@ * $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $ */ #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> @@ -53,13 +54,17 @@ */ #define HTB_HSIZE 16 /* classid hash size */ -#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */ +static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */ #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ #if HTB_VER >> 16 != TC_HTB_PROTOVER #error "Mismatched sch_htb.c and pkt_sch.h" #endif +/* Module parameter and sysfs export */ +module_param (htb_hysteresis, int, 0640); +MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate"); + /* used internaly to keep status of single class */ enum htb_cmode { HTB_CANT_SEND, /* class can't send and can't borrow */ @@ -462,19 +467,21 @@ static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl) htb_remove_class_from_row(q, cl, mask); } -#if HTB_HYSTERESIS static inline long htb_lowater(const struct htb_class *cl) { - return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; + if (htb_hysteresis) + return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; + else + return 0; } static inline long htb_hiwater(const struct htb_class *cl) { - return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; + if (htb_hysteresis) + return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; + else + return 0; } -#else -#define htb_lowater(cl) (0) -#define htb_hiwater(cl) (0) -#endif + /** * htb_class_mode - computes and returns current class mode diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 532634861db1..024c3ebd9661 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -474,6 +474,15 @@ static void sctp_association_destroy(struct sctp_association *asoc) void sctp_assoc_set_primary(struct sctp_association *asoc, struct sctp_transport *transport) { + int changeover = 0; + + /* it's a changeover only if we already have a primary path + * that we are changing + */ + if (asoc->peer.primary_path != NULL && + asoc->peer.primary_path != transport) + changeover = 1 ; + asoc->peer.primary_path = transport; /* Set a default msg_name for events. */ @@ -499,12 +508,12 @@ void sctp_assoc_set_primary(struct sctp_association *asoc, * double switch to the same destination address. */ if (transport->cacc.changeover_active) - transport->cacc.cycling_changeover = 1; + transport->cacc.cycling_changeover = changeover; /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that * a changeover has occurred. */ - transport->cacc.changeover_active = 1; + transport->cacc.changeover_active = changeover; /* 3) The sender MUST store the next TSN to be sent in * next_tsn_at_change. diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index b435a193c5df..9258dfe784ae 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -108,14 +108,23 @@ static __init int sctp_proc_init(void) } if (sctp_snmp_proc_init()) - goto out_nomem; + goto out_snmp_proc_init; if (sctp_eps_proc_init()) - goto out_nomem; + goto out_eps_proc_init; if (sctp_assocs_proc_init()) - goto out_nomem; + goto out_assocs_proc_init; return 0; +out_assocs_proc_init: + sctp_eps_proc_exit(); +out_eps_proc_init: + sctp_snmp_proc_exit(); +out_snmp_proc_init: + if (proc_net_sctp) { + proc_net_sctp = NULL; + remove_proc_entry("sctp", init_net.proc_net); + } out_nomem: return -ENOMEM; } diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e7e3baf7009e..0dbcde6758ea 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4401,7 +4401,9 @@ static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, if (copy_from_user(&getaddrs, optval, len)) return -EFAULT; - if (getaddrs.addr_num <= 0) return -EINVAL; + if (getaddrs.addr_num <= 0 || + getaddrs.addr_num >= (INT_MAX / sizeof(union sctp_addr))) + return -EINVAL; /* * For UDP-style sockets, id specifies the association to query. * If the id field is set to the value '0' then the locally bound diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e18cd3628db4..657835f227d3 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -169,6 +169,11 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk) return (unix_peer(osk) == NULL || unix_our_peer(sk, osk)); } +static inline int unix_recvq_full(struct sock const *sk) +{ + return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog; +} + static struct sock *unix_peer_get(struct sock *s) { struct sock *peer; @@ -482,6 +487,8 @@ static int unix_socketpair(struct socket *, struct socket *); static int unix_accept(struct socket *, struct socket *, int); static int unix_getname(struct socket *, struct sockaddr *, int *, int); static unsigned int unix_poll(struct file *, struct socket *, poll_table *); +static unsigned int unix_datagram_poll(struct file *, struct socket *, + poll_table *); static int unix_ioctl(struct socket *, unsigned int, unsigned long); static int unix_shutdown(struct socket *, int); static int unix_stream_sendmsg(struct kiocb *, struct socket *, @@ -527,7 +534,7 @@ static const struct proto_ops unix_dgram_ops = { .socketpair = unix_socketpair, .accept = sock_no_accept, .getname = unix_getname, - .poll = datagram_poll, + .poll = unix_datagram_poll, .ioctl = unix_ioctl, .listen = sock_no_listen, .shutdown = unix_shutdown, @@ -548,7 +555,7 @@ static const struct proto_ops unix_seqpacket_ops = { .socketpair = unix_socketpair, .accept = unix_accept, .getname = unix_getname, - .poll = datagram_poll, + .poll = unix_datagram_poll, .ioctl = unix_ioctl, .listen = unix_listen, .shutdown = unix_shutdown, @@ -983,8 +990,7 @@ static long unix_wait_for_peer(struct sock *other, long timeo) sched = !sock_flag(other, SOCK_DEAD) && !(other->sk_shutdown & RCV_SHUTDOWN) && - (skb_queue_len(&other->sk_receive_queue) > - other->sk_max_ack_backlog); + unix_recvq_full(other); unix_state_unlock(other); @@ -1058,8 +1064,7 @@ restart: if (other->sk_state != TCP_LISTEN) goto out_unlock; - if (skb_queue_len(&other->sk_receive_queue) > - other->sk_max_ack_backlog) { + if (unix_recvq_full(other)) { err = -EAGAIN; if (!timeo) goto out_unlock; @@ -1428,9 +1433,7 @@ restart: goto out_unlock; } - if (unix_peer(other) != sk && - (skb_queue_len(&other->sk_receive_queue) > - other->sk_max_ack_backlog)) { + if (unix_peer(other) != sk && unix_recvq_full(other)) { if (!timeo) { err = -EAGAIN; goto out_unlock; @@ -1991,6 +1994,64 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl return mask; } +static unsigned int unix_datagram_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk, *peer; + unsigned int mask; + + poll_wait(file, sk->sk_sleep, wait); + + peer = unix_peer_get(sk); + if (peer) { + if (peer != sk) { + /* + * Writability of a connected socket additionally + * depends on the state of the receive queue of the + * peer. + */ + poll_wait(file, &unix_sk(peer)->peer_wait, wait); + } else { + sock_put(peer); + peer = NULL; + } + } + + mask = 0; + + /* exceptional events? */ + if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) + mask |= POLLERR; + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= POLLRDHUP; + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= POLLHUP; + + /* readable? */ + if (!skb_queue_empty(&sk->sk_receive_queue) || + (sk->sk_shutdown & RCV_SHUTDOWN)) + mask |= POLLIN | POLLRDNORM; + + /* Connection-based need to check for termination and startup */ + if (sk->sk_type == SOCK_SEQPACKET) { + if (sk->sk_state == TCP_CLOSE) + mask |= POLLHUP; + /* connection hasn't started yet? */ + if (sk->sk_state == TCP_SYN_SENT) + return mask; + } + + /* writable? */ + if (unix_writable(sk) && !(peer && unix_recvq_full(peer))) + mask |= POLLOUT | POLLWRNORM | POLLWRBAND; + else + set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); + + if (peer) + sock_put(peer); + + return mask; +} #ifdef CONFIG_PROC_FS static struct sock *first_unix_socket(int *i) |