diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-13 21:22:27 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-13 21:22:27 -0400 |
commit | 59aee3c2a1e69fe5062bd1facb72d6fcea3f3f8f (patch) | |
tree | b337d73229a69e399d4e4f7128b33ce734660e35 /net | |
parent | 0d69ae5fb7eb9ba3b54cf0ba4ef5ae591f31eef7 (diff) | |
parent | 046d20b73960b7a2474b6d5e920d54c3fd7c23fe (diff) | |
download | blackbird-op-linux-59aee3c2a1e69fe5062bd1facb72d6fcea3f3f8f.tar.gz blackbird-op-linux-59aee3c2a1e69fe5062bd1facb72d6fcea3f3f8f.zip |
Merge branch 'master'
Diffstat (limited to 'net')
64 files changed, 580 insertions, 220 deletions
diff --git a/net/atm/addr.c b/net/atm/addr.c index a30d0bf48063..3060fd0ba4b9 100644 --- a/net/atm/addr.c +++ b/net/atm/addr.c @@ -44,31 +44,43 @@ static void notify_sigd(struct atm_dev *dev) sigd_enq(NULL, as_itf_notify, NULL, &pvc, NULL); } -void atm_reset_addr(struct atm_dev *dev) +void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this, *p; + struct list_head *head; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry_safe(this, p, &dev->local, entry) { + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry_safe(this, p, head, entry) { list_del(&this->entry); kfree(this); } spin_unlock_irqrestore(&dev->lock, flags); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); } -int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) +int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int error; error = check_addr(addr); if (error) return error; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) { + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) { if (identical(&this->addr, addr)) { spin_unlock_irqrestore(&dev->lock, flags); return -EEXIST; @@ -80,28 +92,36 @@ int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) return -ENOMEM; } this->addr = *addr; - list_add(&this->entry, &dev->local); + list_add(&this->entry, head); spin_unlock_irqrestore(&dev->lock, flags); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); return 0; } -int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) +int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int error; error = check_addr(addr); if (error) return error; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) { + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) { if (identical(&this->addr, addr)) { list_del(&this->entry); spin_unlock_irqrestore(&dev->lock, flags); kfree(this); - notify_sigd(dev); + if (head == &dev->local) + notify_sigd(dev); return 0; } } @@ -110,22 +130,27 @@ int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr) } int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user * buf, - size_t size) + size_t size, enum atm_addr_type_t atype) { unsigned long flags; struct atm_dev_addr *this; + struct list_head *head; int total = 0, error; struct sockaddr_atmsvc *tmp_buf, *tmp_bufp; spin_lock_irqsave(&dev->lock, flags); - list_for_each_entry(this, &dev->local, entry) + if (atype == ATM_ADDR_LECS) + head = &dev->lecs; + else + head = &dev->local; + list_for_each_entry(this, head, entry) total += sizeof(struct sockaddr_atmsvc); tmp_buf = tmp_bufp = kmalloc(total, GFP_ATOMIC); if (!tmp_buf) { spin_unlock_irqrestore(&dev->lock, flags); return -ENOMEM; } - list_for_each_entry(this, &dev->local, entry) + list_for_each_entry(this, head, entry) memcpy(tmp_bufp++, &this->addr, sizeof(struct sockaddr_atmsvc)); spin_unlock_irqrestore(&dev->lock, flags); error = total > size ? -E2BIG : total; diff --git a/net/atm/addr.h b/net/atm/addr.h index 3099d21feeaa..f39433ad45da 100644 --- a/net/atm/addr.h +++ b/net/atm/addr.h @@ -9,10 +9,12 @@ #include <linux/atm.h> #include <linux/atmdev.h> - -void atm_reset_addr(struct atm_dev *dev); -int atm_add_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr); -int atm_del_addr(struct atm_dev *dev,struct sockaddr_atmsvc *addr); -int atm_get_addr(struct atm_dev *dev,struct sockaddr_atmsvc __user *buf,size_t size); +void atm_reset_addr(struct atm_dev *dev, enum atm_addr_type_t type); +int atm_add_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t type); +int atm_del_addr(struct atm_dev *dev, struct sockaddr_atmsvc *addr, + enum atm_addr_type_t type); +int atm_get_addr(struct atm_dev *dev, struct sockaddr_atmsvc __user *buf, + size_t size, enum atm_addr_type_t type); #endif diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c index b2113c3454ae..223c7ad5bd0f 100644 --- a/net/atm/atm_misc.c +++ b/net/atm/atm_misc.c @@ -25,7 +25,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize) struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size, - int gfp_flags) + gfp_t gfp_flags) { struct sock *sk = sk_atm(vcc); int guess = atm_guess_pdu2truesize(pdu_size); diff --git a/net/atm/br2684.c b/net/atm/br2684.c index 289956c4dd3e..72f3f7b8de80 100644 --- a/net/atm/br2684.c +++ b/net/atm/br2684.c @@ -220,7 +220,7 @@ static int br2684_start_xmit(struct sk_buff *skb, struct net_device *dev) /* netif_stop_queue(dev); */ dev_kfree_skb(skb); read_unlock(&devs_lock); - return -EUNATCH; + return 0; } if (!br2684_xmit_vcc(skb, brdev, brvcc)) { /* diff --git a/net/atm/resources.c b/net/atm/resources.c index a57a9268bd24..415d2615d475 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -40,6 +40,7 @@ static struct atm_dev *__alloc_atm_dev(const char *type) dev->link_rate = ATM_OC3_PCR; spin_lock_init(&dev->lock); INIT_LIST_HEAD(&dev->local); + INIT_LIST_HEAD(&dev->lecs); return dev; } @@ -320,10 +321,12 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg) error = -EPERM; goto done; } - atm_reset_addr(dev); + atm_reset_addr(dev, ATM_ADDR_LOCAL); break; case ATM_ADDADDR: case ATM_DELADDR: + case ATM_ADDLECSADDR: + case ATM_DELLECSADDR: if (!capable(CAP_NET_ADMIN)) { error = -EPERM; goto done; @@ -335,14 +338,21 @@ int atm_dev_ioctl(unsigned int cmd, void __user *arg) error = -EFAULT; goto done; } - if (cmd == ATM_ADDADDR) - error = atm_add_addr(dev, &addr); + if (cmd == ATM_ADDADDR || cmd == ATM_ADDLECSADDR) + error = atm_add_addr(dev, &addr, + (cmd == ATM_ADDADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); else - error = atm_del_addr(dev, &addr); + error = atm_del_addr(dev, &addr, + (cmd == ATM_DELADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); goto done; } case ATM_GETADDR: - error = atm_get_addr(dev, buf, len); + case ATM_GETLECSADDR: + error = atm_get_addr(dev, buf, len, + (cmd == ATM_GETADDR ? + ATM_ADDR_LOCAL : ATM_ADDR_LECS)); if (error < 0) goto done; size = error; diff --git a/net/ax25/ax25_in.c b/net/ax25/ax25_in.c index 810c9c76c2e0..73cfc3411c46 100644 --- a/net/ax25/ax25_in.c +++ b/net/ax25/ax25_in.c @@ -123,7 +123,7 @@ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) } skb_pull(skb, 1); /* Remove PID */ - skb->h.raw = skb->data; + skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index d3d6bc547212..59b2dd36baa7 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -372,7 +372,7 @@ static struct proto l2cap_proto = { .obj_size = sizeof(struct l2cap_pinfo) }; -static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *l2cap_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 173f46e8cdae..35adce6482b6 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -229,7 +229,7 @@ static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) d->rx_credits = RFCOMM_DEFAULT_CREDITS; } -struct rfcomm_dlc *rfcomm_dlc_alloc(unsigned int __nocast prio) +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) { struct rfcomm_dlc *d = kmalloc(sizeof(*d), prio); if (!d) diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index f49e7e938bfb..a2b30f0aedb7 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -284,7 +284,7 @@ static struct proto rfcomm_proto = { .obj_size = sizeof(struct rfcomm_pinfo) }; -static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *rfcomm_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct rfcomm_dlc *d; struct sock *sk; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 1bca860a6109..158a9c46d863 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -286,7 +286,7 @@ static inline void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *de skb->destructor = rfcomm_wfree; } -static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, unsigned int __nocast priority) +static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) { if (atomic_read(&dev->wmem_alloc) < rfcomm_room(dev->dlc)) { struct sk_buff *skb = alloc_skb(size, priority); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index ce7ab7dfa0b2..997e42df115c 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -418,7 +418,7 @@ static struct proto sco_proto = { .obj_size = sizeof(struct sco_pinfo) }; -static struct sock *sco_sock_alloc(struct socket *sock, int proto, unsigned int __nocast prio) +static struct sock *sco_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index 91bb895375f4..defcf6a8607c 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -79,7 +79,6 @@ static void destroy_nbp(struct net_bridge_port *p) { struct net_device *dev = p->dev; - dev->br_port = NULL; p->br = NULL; p->dev = NULL; dev_put(dev); @@ -100,6 +99,7 @@ static void del_nbp(struct net_bridge_port *p) struct net_bridge *br = p->br; struct net_device *dev = p->dev; + dev->br_port = NULL; dev_set_promiscuity(dev, -1); spin_lock_bh(&br->lock); diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index c4540144f0f4..f8ffbf6e2333 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -26,6 +26,7 @@ #include <linux/spinlock.h> #include <asm/uaccess.h> #include <linux/smp.h> +#include <linux/cpumask.h> #include <net/sock.h> /* needed for logical [in,out]-dev filtering */ #include "../br_private.h" @@ -823,10 +824,11 @@ static int translate_table(struct ebt_replace *repl, /* this will get free'd in do_replace()/ebt_register_table() if an error occurs */ newinfo->chainstack = (struct ebt_chainstack **) - vmalloc(num_possible_cpus() * sizeof(struct ebt_chainstack)); + vmalloc((highest_possible_processor_id()+1) + * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack) return -ENOMEM; - for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { newinfo->chainstack[i] = vmalloc(udc_cnt * sizeof(struct ebt_chainstack)); if (!newinfo->chainstack[i]) { @@ -895,9 +897,12 @@ static void get_counters(struct ebt_counter *oldcounters, /* counters of cpu 0 */ memcpy(counters, oldcounters, - sizeof(struct ebt_counter) * nentries); + sizeof(struct ebt_counter) * nentries); + /* add other counters to those of cpu 0 */ - for (cpu = 1; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { + if (cpu == 0) + continue; counter_base = COUNTER_BASE(oldcounters, nentries, cpu); for (i = 0; i < nentries; i++) { counters[i].pcnt += counter_base[i].pcnt; @@ -929,7 +934,8 @@ static int do_replace(void __user *user, unsigned int len) BUGPRINT("Entries_size never zero\n"); return -EINVAL; } - countersize = COUNTER_OFFSET(tmp.nentries) * num_possible_cpus(); + countersize = COUNTER_OFFSET(tmp.nentries) * + (highest_possible_processor_id()+1); newinfo = (struct ebt_table_info *) vmalloc(sizeof(struct ebt_table_info) + countersize); if (!newinfo) @@ -1022,7 +1028,7 @@ static int do_replace(void __user *user, unsigned int len) vfree(table->entries); if (table->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(table->chainstack[i]); vfree(table->chainstack); } @@ -1040,7 +1046,7 @@ free_counterstmp: vfree(counterstmp); /* can be initialized in translate_table() */ if (newinfo->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1132,7 +1138,8 @@ int ebt_register_table(struct ebt_table *table) return -EINVAL; } - countersize = COUNTER_OFFSET(table->table->nentries) * num_possible_cpus(); + countersize = COUNTER_OFFSET(table->table->nentries) * + (highest_possible_processor_id()+1); newinfo = (struct ebt_table_info *) vmalloc(sizeof(struct ebt_table_info) + countersize); ret = -ENOMEM; @@ -1186,7 +1193,7 @@ free_unlock: up(&ebt_mutex); free_chainstack: if (newinfo->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(newinfo->chainstack[i]); vfree(newinfo->chainstack); } @@ -1209,7 +1216,7 @@ void ebt_unregister_table(struct ebt_table *table) up(&ebt_mutex); vfree(table->private->entries); if (table->private->chainstack) { - for (i = 0; i < num_possible_cpus(); i++) + for_each_cpu(i) vfree(table->private->chainstack[i]); vfree(table->private->chainstack); } diff --git a/net/core/dev.c b/net/core/dev.c index 9066c874e273..a44eeef24edf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1132,7 +1132,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) #endif /* Keep head the same: replace data */ -int __skb_linearize(struct sk_buff *skb, unsigned int __nocast gfp_mask) +int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask) { unsigned int size; u8 *data; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0e9431b59fb2..af9b1516e21f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -130,7 +130,7 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) * Buffers may only be allocated from interrupts using a @gfp_mask of * %GFP_ATOMIC. */ -struct sk_buff *__alloc_skb(unsigned int size, unsigned int __nocast gfp_mask, +struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask, int fclone) { struct sk_buff *skb; @@ -198,7 +198,7 @@ nodata: */ struct sk_buff *alloc_skb_from_cache(kmem_cache_t *cp, unsigned int size, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { struct sk_buff *skb; u8 *data; @@ -361,7 +361,7 @@ void __kfree_skb(struct sk_buff *skb) * %GFP_ATOMIC. */ -struct sk_buff *skb_clone(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask) { struct sk_buff *n; @@ -500,7 +500,7 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old) * header is going to be modified. Use pskb_copy() instead. */ -struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *skb_copy(const struct sk_buff *skb, gfp_t gfp_mask) { int headerlen = skb->data - skb->head; /* @@ -539,7 +539,7 @@ struct sk_buff *skb_copy(const struct sk_buff *skb, unsigned int __nocast gfp_ma * The returned buffer has a reference count of 1. */ -struct sk_buff *pskb_copy(struct sk_buff *skb, unsigned int __nocast gfp_mask) +struct sk_buff *pskb_copy(struct sk_buff *skb, gfp_t gfp_mask) { /* * Allocate the copy buffer @@ -598,7 +598,7 @@ out: */ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { int i; u8 *data; @@ -689,7 +689,7 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom) */ struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom, int newtailroom, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { /* * Allocate the copy buffer diff --git a/net/core/sock.c b/net/core/sock.c index 928d2a1d6d8e..1c52fe809eda 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -637,7 +637,7 @@ lenout: * @prot: struct proto associated with this new sock instance * @zero_it: if we should zero the newly allocated sock */ -struct sock *sk_alloc(int family, unsigned int __nocast priority, +struct sock *sk_alloc(int family, gfp_t priority, struct proto *prot, int zero_it) { struct sock *sk = NULL; @@ -704,7 +704,7 @@ void sk_free(struct sock *sk) module_put(owner); } -struct sock *sk_clone(const struct sock *sk, const unsigned int __nocast priority) +struct sock *sk_clone(const struct sock *sk, const gfp_t priority) { struct sock *newsk = sk_alloc(sk->sk_family, priority, sk->sk_prot, 0); @@ -845,7 +845,7 @@ unsigned long sock_i_ino(struct sock *sk) * Allocate a skb from the socket's send buffer. */ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) + gfp_t priority) { if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { struct sk_buff * skb = alloc_skb(size, priority); @@ -861,7 +861,7 @@ struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, * Allocate a skb from the socket's receive buffer. */ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, - unsigned int __nocast priority) + gfp_t priority) { if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) { struct sk_buff *skb = alloc_skb(size, priority); @@ -876,7 +876,7 @@ struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force, /* * Allocate a memory block from the socket's option memory buffer. */ -void *sock_kmalloc(struct sock *sk, int size, unsigned int __nocast priority) +void *sock_kmalloc(struct sock *sk, int size, gfp_t priority) { if ((unsigned)size <= sysctl_optmem_max && atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 6530283eafca..c9a62cca22fc 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c @@ -91,7 +91,7 @@ int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb) } struct dccp_ackvec *dccp_ackvec_alloc(const unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { struct dccp_ackvec *av = kmalloc(sizeof(*av) + len, priority); diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h index 8ca51c9191f7..d0fd6c60c574 100644 --- a/net/dccp/ackvec.h +++ b/net/dccp/ackvec.h @@ -74,7 +74,7 @@ struct sk_buff; #ifdef CONFIG_IP_DCCP_ACKVEC extern struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority); + const gfp_t priority); extern void dccp_ackvec_free(struct dccp_ackvec *av); extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, @@ -93,7 +93,7 @@ static inline int dccp_ackvec_pending(const struct dccp_ackvec *av) } #else /* CONFIG_IP_DCCP_ACKVEC */ static inline struct dccp_ackvec *dccp_ackvec_alloc(unsigned int len, - const unsigned int __nocast priority) + const gfp_t priority) { return NULL; } diff --git a/net/dccp/ccid.h b/net/dccp/ccid.h index 21e55142dcd3..c37eeeaf5c6e 100644 --- a/net/dccp/ccid.h +++ b/net/dccp/ccid.h @@ -110,14 +110,14 @@ static inline int ccid_hc_tx_init(struct ccid *ccid, struct sock *sk) static inline void ccid_hc_rx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_rx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_rx_exit != NULL && dccp_sk(sk)->dccps_hc_rx_ccid_private != NULL) ccid->ccid_hc_rx_exit(sk); } static inline void ccid_hc_tx_exit(struct ccid *ccid, struct sock *sk) { - if (ccid->ccid_hc_tx_exit != NULL && + if (ccid != NULL && ccid->ccid_hc_tx_exit != NULL && dccp_sk(sk)->dccps_hc_tx_ccid_private != NULL) ccid->ccid_hc_tx_exit(sk); } diff --git a/net/dccp/ccids/lib/loss_interval.h b/net/dccp/ccids/lib/loss_interval.h index 13ad47ba1420..417d9d82df3e 100644 --- a/net/dccp/ccids/lib/loss_interval.h +++ b/net/dccp/ccids/lib/loss_interval.h @@ -36,7 +36,7 @@ struct dccp_li_hist_entry { static inline struct dccp_li_hist_entry * dccp_li_hist_entry_new(struct dccp_li_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { return kmem_cache_alloc(hist->dccplih_slab, prio); } diff --git a/net/dccp/ccids/lib/packet_history.h b/net/dccp/ccids/lib/packet_history.h index b375ebdb7dcf..122e96737ff6 100644 --- a/net/dccp/ccids/lib/packet_history.h +++ b/net/dccp/ccids/lib/packet_history.h @@ -86,7 +86,7 @@ extern struct dccp_rx_hist_entry * static inline struct dccp_tx_hist_entry * dccp_tx_hist_entry_new(struct dccp_tx_hist *hist, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_tx_hist_entry *entry = kmem_cache_alloc(hist->dccptxh_slab, prio); @@ -137,7 +137,7 @@ static inline struct dccp_rx_hist_entry * const struct sock *sk, const u32 ndp, const struct sk_buff *skb, - const unsigned int __nocast prio) + const gfp_t prio) { struct dccp_rx_hist_entry *entry = kmem_cache_alloc(hist->dccprxh_slab, prio); diff --git a/net/dccp/input.c b/net/dccp/input.c index 1b6b2cb12376..3454d5941900 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -375,6 +375,9 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, case DCCP_PKT_RESET: inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); break; + case DCCP_PKT_DATA: + if (sk->sk_state == DCCP_RESPOND) + break; case DCCP_PKT_DATAACK: case DCCP_PKT_ACK: /* @@ -393,7 +396,8 @@ static int dccp_rcv_respond_partopen_state_process(struct sock *sk, dccp_sk(sk)->dccps_osr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_set_state(sk, DCCP_OPEN); - if (dh->dccph_type == DCCP_PKT_DATAACK) { + if (dh->dccph_type == DCCP_PKT_DATAACK || + dh->dccph_type == DCCP_PKT_DATA) { dccp_rcv_established(sk, skb, dh, len); queued = 1; /* packet was queued (by dccp_rcv_established) */ diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index 348f36b529f7..1186dc44cdff 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -452,7 +452,7 @@ static struct proto dn_proto = { .obj_size = sizeof(struct dn_sock), }; -static struct sock *dn_alloc_sock(struct socket *sock, int gfp) +static struct sock *dn_alloc_sock(struct socket *sock, gfp_t gfp) { struct dn_scp *scp; struct sock *sk = sk_alloc(PF_DECnet, gfp, &dn_proto, 1); @@ -804,7 +804,7 @@ static int dn_auto_bind(struct socket *sock) return rv; } -static int dn_confirm_accept(struct sock *sk, long *timeo, int allocation) +static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); diff --git a/net/decnet/dn_nsp_out.c b/net/decnet/dn_nsp_out.c index 53633d352868..c96c767b1f74 100644 --- a/net/decnet/dn_nsp_out.c +++ b/net/decnet/dn_nsp_out.c @@ -117,7 +117,7 @@ try_again: * The eventual aim is for each socket to have a cached header size * for its outgoing packets, and to set hdr from this when sk != NULL. */ -struct sk_buff *dn_alloc_skb(struct sock *sk, int size, int pri) +struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri) { struct sk_buff *skb; int hdr = 64; @@ -210,7 +210,8 @@ static void dn_nsp_rtt(struct sock *sk, long rtt) * * Returns: The number of times the packet has been sent previously */ -static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, int gfp) +static inline unsigned dn_nsp_clone_and_send(struct sk_buff *skb, + gfp_t gfp) { struct dn_skb_cb *cb = DN_SKB_CB(skb); struct sk_buff *skb2; @@ -350,7 +351,8 @@ static unsigned short *dn_nsp_mk_data_header(struct sock *sk, struct sk_buff *sk return ptr; } -void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, int gfp, int oth) +void dn_nsp_queue_xmit(struct sock *sk, struct sk_buff *skb, + gfp_t gfp, int oth) { struct dn_scp *scp = DN_SK(sk); struct dn_skb_cb *cb = DN_SKB_CB(skb); @@ -517,7 +519,7 @@ static int dn_nsp_retrans_conn_conf(struct sock *sk) return 0; } -void dn_send_conn_conf(struct sock *sk, int gfp) +void dn_send_conn_conf(struct sock *sk, gfp_t gfp) { struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb = NULL; @@ -549,7 +551,8 @@ void dn_send_conn_conf(struct sock *sk, int gfp) static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, int gfp, struct dst_entry *dst, + unsigned short reason, gfp_t gfp, + struct dst_entry *dst, int ddl, unsigned char *dd, __u16 rem, __u16 loc) { struct sk_buff *skb = NULL; @@ -591,7 +594,7 @@ static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, void dn_nsp_send_disc(struct sock *sk, unsigned char msgflg, - unsigned short reason, int gfp) + unsigned short reason, gfp_t gfp) { struct dn_scp *scp = DN_SK(sk); int ddl = 0; @@ -612,7 +615,7 @@ void dn_nsp_return_disc(struct sk_buff *skb, unsigned char msgflg, { struct dn_skb_cb *cb = DN_SKB_CB(skb); int ddl = 0; - int gfp = GFP_ATOMIC; + gfp_t gfp = GFP_ATOMIC; dn_nsp_do_disc(NULL, msgflg, reason, gfp, skb->dst, ddl, NULL, cb->src_port, cb->dst_port); @@ -624,7 +627,7 @@ void dn_nsp_send_link(struct sock *sk, unsigned char lsflags, char fcval) struct dn_scp *scp = DN_SK(sk); struct sk_buff *skb; unsigned char *ptr; - int gfp = GFP_ATOMIC; + gfp_t gfp = GFP_ATOMIC; if ((skb = dn_alloc_skb(sk, DN_MAX_NSP_DATA_HEADER + 2, gfp)) == NULL) return; @@ -659,7 +662,7 @@ void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg) unsigned char menuver; struct dn_skb_cb *cb; unsigned char type = 1; - int allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; + gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC; struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation); if (!skb) diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index 4644338ac571..42c50619aa8e 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -187,7 +187,7 @@ void ieee80211_txb_free(struct ieee80211_txb *txb) } static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, - unsigned int __nocast gfp_mask) + gfp_t gfp_mask) { struct ieee80211_txb *txb; int i; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 1b5a09d1b90b..1b18ce66e7b7 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -5,6 +5,7 @@ #include <net/esp.h> #include <asm/scatterlist.h> #include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <net/icmp.h> @@ -42,10 +43,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) esp = x->data; alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; - clen = (clen + 2 + blksize-1)&~(blksize-1); + blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) - clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); + clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) goto error; @@ -143,7 +144,7 @@ static int esp_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc struct ip_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; - int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ip_esp_hdr) - esp->conf.ivlen - alen; int nfrags; @@ -304,16 +305,16 @@ static int esp_post_input(struct xfrm_state *x, struct xfrm_decap_state *decap, static u32 esp4_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); if (x->props.mode) { - mtu = (mtu + 2 + blksize-1)&~(blksize-1); + mtu = ALIGN(mtu + 2, blksize); } else { /* The worst case. */ - mtu += 2 + blksize; + mtu = ALIGN(mtu + 2, 4) + blksize - 4; } if (esp->conf.padlen) - mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); + mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_trunc_len; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index fe3c6d3d0c91..94468a76c5b4 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -494,7 +494,7 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); struct sock *inet_csk_clone(struct sock *sk, const struct request_sock *req, - const unsigned int __nocast priority) + const gfp_t priority) { struct sock *newsk = sk_clone(sk, priority); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index f9076ef3a1a8..a010e9a68811 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -111,6 +111,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat tw->tw_prot = sk->sk_prot_creator; atomic_set(&tw->tw_refcnt, 1); inet_twsk_dead_node_init(tw); + __module_get(tw->tw_prot->owner); } return tw; diff --git a/net/ipv4/ipvs/ip_vs_app.c b/net/ipv4/ipvs/ip_vs_app.c index 6e092dadb388..fc6f95aaa969 100644 --- a/net/ipv4/ipvs/ip_vs_app.c +++ b/net/ipv4/ipvs/ip_vs_app.c @@ -604,7 +604,7 @@ static struct file_operations ip_vs_app_fops = { /* * Replace a segment of data with a new segment */ -int ip_vs_skb_replace(struct sk_buff *skb, int pri, +int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri, char *o_buf, int o_len, char *n_buf, int n_len) { struct iphdr *iph; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 2cd7e7d1ac90..7d917e4ce1d9 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -139,9 +139,10 @@ config IP_NF_AMANDA config IP_NF_PPTP tristate 'PPTP protocol support' + depends on IP_NF_CONNTRACK help This module adds support for PPTP (Point to Point Tunnelling - Protocol, RFC2637) conncection tracking and NAT. + Protocol, RFC2637) connection tracking and NAT. If you are running PPTP sessions over a stateful firewall or NAT box, you may want to enable this feature. @@ -498,9 +499,14 @@ config IP_NF_TARGET_LOG To compile it as a module, choose M here. If unsure, say N. config IP_NF_TARGET_ULOG - tristate "ULOG target support" + tristate "ULOG target support (OBSOLETE)" depends on IP_NF_IPTABLES ---help--- + + This option enables the old IPv4-only "ipt_ULOG" implementation + which has been obsoleted by the new "nfnetlink_log" code (see + CONFIG_NETFILTER_NETLINK_LOG). + This option adds a `ULOG' target, which allows you to create rules in any iptables table. The packet is passed to a userspace logging daemon using netlink multicast sockets; unlike the LOG target diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index fa1634256680..a7969286e6e7 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -716,8 +716,10 @@ static int translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -767,7 +769,7 @@ static void get_counters(const struct arpt_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; ARPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -885,7 +887,8 @@ static int do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1158,7 +1161,8 @@ int arpt_register_table(struct arpt_table *table, = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct arpt_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) { ret = -ENOMEM; return ret; diff --git a/net/ipv4/netfilter/ip_conntrack_core.c b/net/ipv4/netfilter/ip_conntrack_core.c index ea65dd3e517a..07a80b56e8dc 100644 --- a/net/ipv4/netfilter/ip_conntrack_core.c +++ b/net/ipv4/netfilter/ip_conntrack_core.c @@ -1119,7 +1119,7 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct, unsigned long extra_jiffies, int do_acct) { - int do_event = 0; + int event = 0; IP_NF_ASSERT(ct->timeout.data == (unsigned long)ct); IP_NF_ASSERT(skb); @@ -1129,13 +1129,13 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct, /* If not in hash table, timer will not be active yet */ if (!is_confirmed(ct)) { ct->timeout.expires = extra_jiffies; - do_event = 1; + event = IPCT_REFRESH; } else { /* Need del_timer for race avoidance (may already be dying). */ if (del_timer(&ct->timeout)) { ct->timeout.expires = jiffies + extra_jiffies; add_timer(&ct->timeout); - do_event = 1; + event = IPCT_REFRESH; } } @@ -1144,14 +1144,17 @@ void __ip_ct_refresh_acct(struct ip_conntrack *ct, ct->counters[CTINFO2DIR(ctinfo)].packets++; ct->counters[CTINFO2DIR(ctinfo)].bytes += ntohs(skb->nh.iph->tot_len); + if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000) + || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000)) + event |= IPCT_COUNTER_FILLING; } #endif write_unlock_bh(&ip_conntrack_lock); /* must be unlocked when calling event cache */ - if (do_event) - ip_conntrack_event_cache(IPCT_REFRESH, skb); + if (event) + ip_conntrack_event_cache(event, skb); } #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c index b08a432efcf8..166e6069f121 100644 --- a/net/ipv4/netfilter/ip_conntrack_netlink.c +++ b/net/ipv4/netfilter/ip_conntrack_netlink.c @@ -177,11 +177,11 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct, struct nfattr *nest_count = NFA_NEST(skb, type); u_int64_t tmp; - tmp = cpu_to_be64(ct->counters[dir].packets); - NFA_PUT(skb, CTA_COUNTERS_PACKETS, sizeof(u_int64_t), &tmp); + tmp = htonl(ct->counters[dir].packets); + NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp); - tmp = cpu_to_be64(ct->counters[dir].bytes); - NFA_PUT(skb, CTA_COUNTERS_BYTES, sizeof(u_int64_t), &tmp); + tmp = htonl(ct->counters[dir].bytes); + NFA_PUT(skb, CTA_COUNTERS32_BYTES, sizeof(u_int32_t), &tmp); NFA_NEST_END(skb, nest_count); @@ -833,7 +833,8 @@ out: static inline int ctnetlink_change_status(struct ip_conntrack *ct, struct nfattr *cda[]) { - unsigned long d, status = *(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1]); + unsigned long d; + unsigned status = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_STATUS-1])); d = ct->status ^ status; if (d & (IPS_EXPECTED|IPS_CONFIRMED|IPS_DYING)) @@ -948,6 +949,31 @@ ctnetlink_change_timeout(struct ip_conntrack *ct, struct nfattr *cda[]) return 0; } +static inline int +ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[]) +{ + struct nfattr *tb[CTA_PROTOINFO_MAX], *attr = cda[CTA_PROTOINFO-1]; + struct ip_conntrack_protocol *proto; + u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum; + int err = 0; + + if (nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr) < 0) + goto nfattr_failure; + + proto = ip_conntrack_proto_find_get(npt); + if (!proto) + return -EINVAL; + + if (proto->from_nfattr) + err = proto->from_nfattr(tb, ct); + ip_conntrack_proto_put(proto); + + return err; + +nfattr_failure: + return -ENOMEM; +} + static int ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) { @@ -973,6 +999,12 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[]) return err; } + if (cda[CTA_PROTOINFO-1]) { + err = ctnetlink_change_protoinfo(ct, cda); + if (err < 0) + return err; + } + DEBUGP("all done\n"); return 0; } @@ -1002,6 +1034,12 @@ ctnetlink_create_conntrack(struct nfattr *cda[], if (err < 0) goto err; + if (cda[CTA_PROTOINFO-1]) { + err = ctnetlink_change_protoinfo(ct, cda); + if (err < 0) + return err; + } + ct->helper = ip_conntrack_helper_find_get(rtuple); add_timer(&ct->timeout); diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c index 838d1d69b36e..98f0015dd255 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c @@ -296,8 +296,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[], struct ip_conntrack_tuple *tuple) { if (!tb[CTA_PROTO_ICMP_TYPE-1] - || !tb[CTA_PROTO_ICMP_CODE-1] - || !tb[CTA_PROTO_ICMP_ID-1]) + || !tb[CTA_PROTO_ICMP_CODE-1]) return -1; tuple->dst.u.icmp.type = diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c index 121760d6cc50..d6701cafbcc2 100644 --- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c +++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c @@ -341,17 +341,43 @@ static int tcp_print_conntrack(struct seq_file *s, static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa, const struct ip_conntrack *ct) { + struct nfattr *nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP); + read_lock_bh(&tcp_lock); NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t), &ct->proto.tcp.state); read_unlock_bh(&tcp_lock); + NFA_NEST_END(skb, nest_parms); + return 0; nfattr_failure: read_unlock_bh(&tcp_lock); return -1; } + +static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct) +{ + struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1]; + struct nfattr *tb[CTA_PROTOINFO_TCP_MAX]; + + if (nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr) < 0) + goto nfattr_failure; + + if (!tb[CTA_PROTOINFO_TCP_STATE-1]) + return -EINVAL; + + write_lock_bh(&tcp_lock); + ct->proto.tcp.state = + *(u_int8_t *)NFA_DATA(tb[CTA_PROTOINFO_TCP_STATE-1]); + write_unlock_bh(&tcp_lock); + + return 0; + +nfattr_failure: + return -1; +} #endif static unsigned int get_conntrack_index(const struct tcphdr *tcph) @@ -1123,6 +1149,7 @@ struct ip_conntrack_protocol ip_conntrack_protocol_tcp = #if defined(CONFIG_IP_NF_CONNTRACK_NETLINK) || \ defined(CONFIG_IP_NF_CONNTRACK_NETLINK_MODULE) .to_nfattr = tcp_to_nfattr, + .from_nfattr = nfattr_to_tcp, .tuple_to_nfattr = ip_ct_port_tuple_to_nfattr, .nfattr_to_tuple = ip_ct_port_nfattr_to_tuple, #endif diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index eef99a1b5de6..75c27e92f6ab 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -27,6 +27,7 @@ #include <asm/semaphore.h> #include <linux/proc_fs.h> #include <linux/err.h> +#include <linux/cpumask.h> #include <linux/netfilter_ipv4/ip_tables.h> @@ -921,8 +922,10 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -943,7 +946,7 @@ replace_table(struct ipt_table *table, struct ipt_entry *table_base; unsigned int i; - for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -990,7 +993,7 @@ get_counters(const struct ipt_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; IPT_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1128,7 +1131,8 @@ do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1458,7 +1462,8 @@ int ipt_register_table(struct ipt_table *table, const struct ipt_replace *repl) = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct ipt_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index b940346de4e7..6d80e063c187 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c @@ -136,7 +136,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1)) /* slow start */ ca->cnt = (cwnd * (BICTCP_B-1)) - / cwnd-ca->last_max_cwnd; + / (cwnd - ca->last_max_cwnd); else /* linear increase */ ca->cnt = cwnd / max_increment; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index c5b911f9b662..7114031fdc70 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -435,7 +435,16 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss int nsize, old_factor; u16 flags; - BUG_ON(len >= skb->len); + if (unlikely(len >= skb->len)) { + if (net_ratelimit()) { + printk(KERN_DEBUG "TCP: seg_size=%u, mss=%u, seq=%u, " + "end_seq=%u, skb->len=%u.\n", len, mss_now, + TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, + skb->len); + WARN_ON(1); + } + return 0; + } nsize = skb_headlen(skb) - len; if (nsize < 0) @@ -1610,7 +1619,7 @@ void tcp_send_fin(struct sock *sk) * was unread data in the receive queue. This behavior is recommended * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM */ -void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) +void tcp_send_active_reset(struct sock *sk, gfp_t priority) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 9b27460f0cc7..40d9a1935ab5 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -31,6 +31,7 @@ #include <net/esp.h> #include <asm/scatterlist.h> #include <linux/crypto.h> +#include <linux/kernel.h> #include <linux/pfkeyv2.h> #include <linux/random.h> #include <net/icmp.h> @@ -66,10 +67,10 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb) alen = esp->auth.icv_trunc_len; tfm = esp->conf.tfm; - blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3; - clen = (clen + 2 + blksize-1)&~(blksize-1); + blksize = ALIGN(crypto_tfm_alg_blocksize(tfm), 4); + clen = ALIGN(clen + 2, blksize); if (esp->conf.padlen) - clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1); + clen = ALIGN(clen, esp->conf.padlen); if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) { goto error; @@ -133,7 +134,7 @@ static int esp6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, stru struct ipv6_esp_hdr *esph; struct esp_data *esp = x->data; struct sk_buff *trailer; - int blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + int blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); int alen = esp->auth.icv_trunc_len; int elen = skb->len - sizeof(struct ipv6_esp_hdr) - esp->conf.ivlen - alen; @@ -235,16 +236,17 @@ out_nofree: static u32 esp6_get_max_size(struct xfrm_state *x, int mtu) { struct esp_data *esp = x->data; - u32 blksize = crypto_tfm_alg_blocksize(esp->conf.tfm); + u32 blksize = ALIGN(crypto_tfm_alg_blocksize(esp->conf.tfm), 4); if (x->props.mode) { - mtu = (mtu + 2 + blksize-1)&~(blksize-1); + mtu = ALIGN(mtu + 2, blksize); } else { /* The worst case. */ - mtu += 2 + blksize; + u32 padsize = ((blksize - 1) & 7) + 1; + mtu = ALIGN(mtu + 2, padsize) + blksize - padsize; } if (esp->conf.padlen) - mtu = (mtu + esp->conf.padlen-1)&~(esp->conf.padlen-1); + mtu = ALIGN(mtu, esp->conf.padlen); return mtu + x->props.header_len + esp->auth.icv_full_len; } diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index 519899fb11d5..39a96c768102 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1393,7 +1393,7 @@ static void mld_sendpack(struct sk_buff *skb) static int grec_size(struct ifmcaddr6 *pmc, int type, int gdel, int sdel) { - return sizeof(struct mld2_grec) + 4*mld_scount(pmc,type,gdel,sdel); + return sizeof(struct mld2_grec) + 16 * mld_scount(pmc,type,gdel,sdel); } static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 555a31347eda..305d9ee6d7db 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1450,7 +1450,7 @@ void ndisc_send_redirect(struct sk_buff *skb, struct neighbour *neigh, static void pndisc_redo(struct sk_buff *skb) { - ndisc_rcv(skb); + ndisc_recv_ns(skb); kfree_skb(skb); } diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 2da514b16d95..b03e90649eb5 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -28,6 +28,7 @@ #include <asm/uaccess.h> #include <asm/semaphore.h> #include <linux/proc_fs.h> +#include <linux/cpumask.h> #include <linux/netfilter_ipv6/ip6_tables.h> @@ -950,8 +951,10 @@ translate_table(const char *name, } /* And one copy for every other CPU */ - for (i = 1; i < num_possible_cpus(); i++) { - memcpy(newinfo->entries + SMP_ALIGN(newinfo->size)*i, + for_each_cpu(i) { + if (i == 0) + continue; + memcpy(newinfo->entries + SMP_ALIGN(newinfo->size) * i, newinfo->entries, SMP_ALIGN(newinfo->size)); } @@ -973,6 +976,7 @@ replace_table(struct ip6t_table *table, unsigned int i; for (i = 0; i < num_possible_cpus(); i++) { + for_each_cpu(i) { table_base = (void *)newinfo->entries + TABLE_OFFSET(newinfo, i); @@ -1019,7 +1023,7 @@ get_counters(const struct ip6t_table_info *t, unsigned int cpu; unsigned int i; - for (cpu = 0; cpu < num_possible_cpus(); cpu++) { + for_each_cpu(cpu) { i = 0; IP6T_ENTRY_ITERATE(t->entries + TABLE_OFFSET(t, cpu), t->size, @@ -1153,7 +1157,8 @@ do_replace(void __user *user, unsigned int len) return -ENOMEM; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(tmp.size) * num_possible_cpus()); + + SMP_ALIGN(tmp.size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; @@ -1467,7 +1472,8 @@ int ip6t_register_table(struct ip6t_table *table, = { 0, 0, 0, { 0 }, { 0 }, { } }; newinfo = vmalloc(sizeof(struct ip6t_table_info) - + SMP_ALIGN(repl->size) * num_possible_cpus()); + + SMP_ALIGN(repl->size) * + (highest_possible_processor_id()+1)); if (!newinfo) return -ENOMEM; diff --git a/net/key/af_key.c b/net/key/af_key.c index 4879743b945a..39031684b65c 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -185,7 +185,7 @@ static int pfkey_release(struct socket *sock) } static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, - int allocation, struct sock *sk) + gfp_t allocation, struct sock *sk) { int err = -ENOBUFS; @@ -217,7 +217,7 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2, #define BROADCAST_ONE 1 #define BROADCAST_REGISTERED 2 #define BROADCAST_PROMISC_ONLY 4 -static int pfkey_broadcast(struct sk_buff *skb, int allocation, +static int pfkey_broadcast(struct sk_buff *skb, gfp_t allocation, int broadcast_flags, struct sock *one_sk) { struct sock *sk; @@ -1416,7 +1416,8 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, return 0; } -static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, int allocation) +static struct sk_buff *compose_sadb_supported(struct sadb_msg *orig, + gfp_t allocation) { struct sk_buff *skb; struct sadb_msg *hdr; @@ -2153,6 +2154,7 @@ out: static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, void **ext_hdrs) { + unsigned int dir; int err; struct sadb_x_policy *pol; struct xfrm_policy *xp; @@ -2161,7 +2163,11 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h if ((pol = ext_hdrs[SADB_X_EXT_POLICY-1]) == NULL) return -EINVAL; - xp = xfrm_policy_byid(0, pol->sadb_x_policy_id, + dir = xfrm_policy_id2dir(pol->sadb_x_policy_id); + if (dir >= XFRM_POLICY_MAX) + return -EINVAL; + + xp = xfrm_policy_byid(dir, pol->sadb_x_policy_id, hdr->sadb_msg_type == SADB_X_SPDDELETE2); if (xp == NULL) return -ENOENT; @@ -2173,9 +2179,9 @@ static int pfkey_spdget(struct sock *sk, struct sk_buff *skb, struct sadb_msg *h if (hdr->sadb_msg_type == SADB_X_SPDDELETE2) { c.data.byid = 1; c.event = XFRM_MSG_DELPOLICY; - km_policy_notify(xp, pol->sadb_x_policy_dir-1, &c); + km_policy_notify(xp, dir, &c); } else { - err = key_pol_get_resp(sk, xp, hdr, pol->sadb_x_policy_dir-1); + err = key_pol_get_resp(sk, xp, hdr, dir); } xfrm_pol_put(xp); diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 042b24a8ca4c..c761c15da421 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -867,8 +867,7 @@ static void llc_sk_init(struct sock* sk) * Allocates a LLC sock and initializes it. Returns the new LLC sock * or %NULL if there's no memory available for one */ -struct sock *llc_sk_alloc(int family, unsigned int __nocast priority, - struct proto *prot) +struct sock *llc_sk_alloc(int family, gfp_t priority, struct proto *prot) { struct sock *sk = sk_alloc(family, priority, prot, 1); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 49a3900e3d32..4bc27a6334c1 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -133,7 +133,7 @@ int nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len) memset(tb, 0, sizeof(struct nfattr *) * maxattr); while (NFA_OK(nfa, len)) { - unsigned flavor = nfa->nfa_type; + unsigned flavor = NFA_TYPE(nfa); if (flavor && flavor <= maxattr) tb[flavor-1] = nfa; nfa = NFA_NEXT(nfa, len); @@ -177,7 +177,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); while (NFA_OK(attr, attrlen)) { - unsigned flavor = attr->nfa_type; + unsigned flavor = NFA_TYPE(attr); if (flavor) { if (flavor > attr_count) return -EINVAL; @@ -195,7 +195,7 @@ nfnetlink_check_attributes(struct nfnetlink_subsystem *subsys, int nfnetlink_send(struct sk_buff *skb, u32 pid, unsigned group, int echo) { - int allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; + gfp_t allocation = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL; int err = 0; NETLINK_CB(skb).dst_group = group; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index a64e1d5ce3ca..678c3f2c0d0b 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -758,7 +758,7 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb) } static inline struct sk_buff *netlink_trim(struct sk_buff *skb, - unsigned int __nocast allocation) + gfp_t allocation) { int delta; @@ -880,7 +880,7 @@ out: } int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, - u32 group, unsigned int __nocast allocation) + u32 group, gfp_t allocation) { struct netlink_broadcast_data info; struct hlist_node *node; diff --git a/net/netrom/nr_dev.c b/net/netrom/nr_dev.c index 4e66eef9a034..509afddae569 100644 --- a/net/netrom/nr_dev.c +++ b/net/netrom/nr_dev.c @@ -58,7 +58,7 @@ int nr_rx_ip(struct sk_buff *skb, struct net_device *dev) /* Spoof incoming device */ skb->dev = dev; - skb->h.raw = skb->data; + skb->mac.raw = skb->nh.raw; skb->nh.raw = skb->data; skb->pkt_type = PACKET_HOST; diff --git a/net/rxrpc/call.c b/net/rxrpc/call.c index 5cfd4cadee42..c4aeb7d40266 100644 --- a/net/rxrpc/call.c +++ b/net/rxrpc/call.c @@ -1923,7 +1923,7 @@ int rxrpc_call_write_data(struct rxrpc_call *call, size_t sioc, struct kvec *siov, u8 rxhdr_flags, - int alloc_flags, + gfp_t alloc_flags, int dup_data, size_t *size_sent) { diff --git a/net/rxrpc/connection.c b/net/rxrpc/connection.c index 61463c74f8cc..2ba14a75dbbe 100644 --- a/net/rxrpc/connection.c +++ b/net/rxrpc/connection.c @@ -522,7 +522,7 @@ int rxrpc_conn_newmsg(struct rxrpc_connection *conn, uint8_t type, int dcount, struct kvec diov[], - int alloc_flags, + gfp_t alloc_flags, struct rxrpc_message **_msg) { struct rxrpc_message *msg; diff --git a/net/sched/Kconfig b/net/sched/Kconfig index 45d3bc0812c8..81510da31792 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -72,9 +72,11 @@ config NET_SCH_CLK_GETTIMEOFDAY Choose this if you need a high resolution clock source but can't use the CPU's cycle counter. +# don't allow on SMP x86 because they can have unsynchronized TSCs. +# gettimeofday is a good alternative config NET_SCH_CLK_CPU bool "CPU cycle counter" - depends on X86_TSC || X86_64 || ALPHA || SPARC64 || PPC64 || IA64 + depends on ((X86_TSC || X86_64) && !SMP) || ALPHA || SPARC64 || PPC64 || IA64 help Say Y here if you want to use the CPU's cycle counter as clock source. This is a cheap and high resolution clock source, but on some diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 5b24ae0650d3..12b0f582a66b 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -71,7 +71,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sock *sp; int i; @@ -273,7 +273,7 @@ fail_init: struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, const struct sock *sk, sctp_scope_t scope, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; @@ -479,7 +479,7 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc, /* Add a transport address to an association. */ struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, const union sctp_addr *addr, - const unsigned int __nocast gfp, + const gfp_t gfp, const int peer_state) { struct sctp_transport *peer; @@ -1231,7 +1231,7 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) * local endpoint and the remote peer. */ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, - unsigned int __nocast gfp) + gfp_t gfp) { sctp_scope_t scope; int flags; @@ -1254,7 +1254,7 @@ int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, /* Build the association's bind address list from the cookie. */ int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, struct sctp_cookie *cookie, - unsigned int __nocast gfp) + gfp_t gfp) { int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); int var_size3 = cookie->raw_addr_list_len; diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c index f71549710f2e..2b962627f631 100644 --- a/net/sctp/bind_addr.c +++ b/net/sctp/bind_addr.c @@ -53,7 +53,7 @@ /* Forward declarations for internal helpers. */ static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags); static void sctp_bind_addr_clean(struct sctp_bind_addr *); @@ -64,7 +64,7 @@ static void sctp_bind_addr_clean(struct sctp_bind_addr *); */ int sctp_bind_addr_copy(struct sctp_bind_addr *dest, const struct sctp_bind_addr *src, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags) { struct sctp_sockaddr_entry *addr; @@ -146,7 +146,7 @@ void sctp_bind_addr_free(struct sctp_bind_addr *bp) /* Add an address to the bind address list in the SCTP_bind_addr structure. */ int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sockaddr_entry *addr; @@ -200,7 +200,7 @@ int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) */ union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, int *addrs_len, - unsigned int __nocast gfp) + gfp_t gfp) { union sctp_params addrparms; union sctp_params retval; @@ -252,7 +252,7 @@ end_raw: * address parameters). */ int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, - int addrs_len, __u16 port, unsigned int __nocast gfp) + int addrs_len, __u16 port, gfp_t gfp) { union sctp_addr_param *rawaddr; struct sctp_paramhdr *param; @@ -350,7 +350,7 @@ union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, /* Copy out addresses from the global local address list. */ static int sctp_copy_one_addr(struct sctp_bind_addr *dest, union sctp_addr *addr, - sctp_scope_t scope, unsigned int __nocast gfp, + sctp_scope_t scope, gfp_t gfp, int flags) { int error = 0; diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c index 61da2937e641..83ef411772f4 100644 --- a/net/sctp/chunk.c +++ b/net/sctp/chunk.c @@ -62,7 +62,7 @@ static void sctp_datamsg_init(struct sctp_datamsg *msg) } /* Allocate and initialize datamsg. */ -SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(unsigned int __nocast gfp) +SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(gfp_t gfp) { struct sctp_datamsg *msg; msg = kmalloc(sizeof(struct sctp_datamsg), gfp); diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c index e22ccd655965..96984f7a2d69 100644 --- a/net/sctp/endpointola.c +++ b/net/sctp/endpointola.c @@ -68,7 +68,7 @@ static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); */ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, struct sock *sk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_sock *sp = sctp_sk(sk); memset(ep, 0, sizeof(struct sctp_endpoint)); @@ -138,8 +138,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, /* Create a sctp_endpoint with all that boring stuff initialized. * Returns NULL if there isn't enough memory. */ -struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, - unsigned int __nocast gfp) +struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, gfp_t gfp) { struct sctp_endpoint *ep; diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index f01d1c9002a1..26de4d3e1bd9 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -219,7 +219,7 @@ static void sctp_free_local_addr_list(void) /* Copy the local addresses which are valid for 'scope' into 'bp'. */ int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, - unsigned int __nocast gfp, int copy_flags) + gfp_t gfp, int copy_flags) { struct sctp_sockaddr_entry *addr; int error = 0; diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 3868a8d70cc0..10e82ec2ebd3 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -78,7 +78,7 @@ static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, - unsigned int __nocast gfp); + gfp_t gfp); /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) @@ -174,7 +174,7 @@ void sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code, */ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, const struct sctp_bind_addr *bp, - unsigned int __nocast gfp, int vparam_len) + gfp_t gfp, int vparam_len) { sctp_inithdr_t init; union sctp_params addrs; @@ -261,7 +261,7 @@ nodata: struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, const struct sctp_chunk *chunk, - unsigned int __nocast gfp, int unkparam_len) + gfp_t gfp, int unkparam_len) { sctp_inithdr_t initack; struct sctp_chunk *retval; @@ -1234,7 +1234,7 @@ void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) /* Create a CLOSED association to use with an incoming packet. */ struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; struct sk_buff *skb; @@ -1349,7 +1349,7 @@ nodata: struct sctp_association *sctp_unpack_cookie( const struct sctp_endpoint *ep, const struct sctp_association *asoc, - struct sctp_chunk *chunk, unsigned int __nocast gfp, + struct sctp_chunk *chunk, gfp_t gfp, int *error, struct sctp_chunk **errp) { struct sctp_association *retval = NULL; @@ -1814,7 +1814,7 @@ int sctp_verify_init(const struct sctp_association *asoc, */ int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, const union sctp_addr *peer_addr, - sctp_init_chunk_t *peer_init, unsigned int __nocast gfp) + sctp_init_chunk_t *peer_init, gfp_t gfp) { union sctp_params param; struct sctp_transport *transport; @@ -1985,7 +1985,7 @@ nomem: static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, - unsigned int __nocast gfp) + gfp_t gfp) { union sctp_addr addr; int i; diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 39c970b5b198..f84173ea8ec1 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -63,7 +63,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp); + gfp_t gfp); static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, sctp_state_t state, struct sctp_endpoint *ep, @@ -71,7 +71,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp); + gfp_t gfp); /******************************************************************** * Helper functions @@ -498,7 +498,7 @@ static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, struct sctp_association *asoc, struct sctp_chunk *chunk, sctp_init_chunk_t *peer_init, - unsigned int __nocast gfp) + gfp_t gfp) { int error; @@ -853,7 +853,7 @@ int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, struct sctp_endpoint *ep, struct sctp_association *asoc, void *event_arg, - unsigned int __nocast gfp) + gfp_t gfp) { sctp_cmd_seq_t commands; const sctp_sm_table_entry_t *state_fn; @@ -898,7 +898,7 @@ static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp) + gfp_t gfp) { int error; @@ -986,7 +986,7 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, void *event_arg, sctp_disposition_t status, sctp_cmd_seq_t *commands, - unsigned int __nocast gfp) + gfp_t gfp) { int error = 0; int force; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 91ec8c936913..02e068d3450d 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3159,8 +3159,9 @@ static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval return 0; } -static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len, - char __user *optval, int __user *optlen) +static int sctp_getsockopt_peer_addrs_num_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { sctp_assoc_t id; struct sctp_association *asoc; @@ -3185,23 +3186,28 @@ static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len, return cnt; } -static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, - char __user *optval, int __user *optlen) +/* + * Old API for getting list of peer addresses. Does not work for 32-bit + * programs running on a 64-bit kernel + */ +static int sctp_getsockopt_peer_addrs_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { struct sctp_association *asoc; struct list_head *pos; int cnt = 0; - struct sctp_getaddrs getaddrs; + struct sctp_getaddrs_old getaddrs; struct sctp_transport *from; void __user *to; union sctp_addr temp; struct sctp_sock *sp = sctp_sk(sk); int addrlen; - if (len != sizeof(struct sctp_getaddrs)) + if (len != sizeof(struct sctp_getaddrs_old)) return -EINVAL; - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) return -EFAULT; if (getaddrs.addr_num <= 0) return -EINVAL; @@ -3225,15 +3231,69 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, if (cnt >= getaddrs.addr_num) break; } getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) + if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) + return -EFAULT; + + return 0; +} + +static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_association *asoc; + struct list_head *pos; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_transport *from; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + size_t space_left; + int bytes_copied; + + if (len < sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* For UDP-style sockets, id specifies the association to query. */ + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + + to = optval + offsetof(struct sctp_getaddrs,addrs); + space_left = len - sizeof(struct sctp_getaddrs) - + offsetof(struct sctp_getaddrs,addrs); + + list_for_each(pos, &asoc->peer.transport_addr_list) { + from = list_entry(pos, struct sctp_transport, transports); + memcpy(&temp, &from->ipaddr, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); + addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; + if(space_left < addrlen) + return -ENOMEM; + temp.v4.sin_port = htons(temp.v4.sin_port); + if (copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; + cnt++; + space_left -= addrlen; + } + + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) + return -EFAULT; + bytes_copied = ((char __user *)to) - optval; + if (put_user(bytes_copied, optlen)) return -EFAULT; return 0; } -static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len, - char __user *optval, - int __user *optlen) +static int sctp_getsockopt_local_addrs_num_old(struct sock *sk, int len, + char __user *optval, + int __user *optlen) { sctp_assoc_t id; struct sctp_bind_addr *bp; @@ -3306,8 +3366,8 @@ done: /* Helper function that copies local addresses to user and returns the number * of addresses copied. */ -static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, int max_addrs, - void __user *to) +static int sctp_copy_laddrs_to_user_old(struct sock *sk, __u16 port, int max_addrs, + void __user *to) { struct list_head *pos; struct sctp_sockaddr_entry *addr; @@ -3341,14 +3401,54 @@ static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, int max_addrs, return cnt; } -static int sctp_getsockopt_local_addrs(struct sock *sk, int len, - char __user *optval, int __user *optlen) +static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, + void * __user *to, size_t space_left) +{ + struct list_head *pos; + struct sctp_sockaddr_entry *addr; + unsigned long flags; + union sctp_addr temp; + int cnt = 0; + int addrlen; + + sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); + list_for_each(pos, &sctp_local_addr_list) { + addr = list_entry(pos, struct sctp_sockaddr_entry, list); + if ((PF_INET == sk->sk_family) && + (AF_INET6 == addr->a.sa.sa_family)) + continue; + memcpy(&temp, &addr->a, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), + &temp); + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if(space_left<addrlen) + return -ENOMEM; + temp.v4.sin_port = htons(port); + if (copy_to_user(*to, &temp, addrlen)) { + sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, + flags); + return -EFAULT; + } + *to += addrlen; + cnt ++; + space_left -= addrlen; + } + sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); + + return cnt; +} + +/* Old API for getting list of local addresses. Does not work for 32-bit + * programs running on a 64-bit kernel + */ +static int sctp_getsockopt_local_addrs_old(struct sock *sk, int len, + char __user *optval, int __user *optlen) { struct sctp_bind_addr *bp; struct sctp_association *asoc; struct list_head *pos; int cnt = 0; - struct sctp_getaddrs getaddrs; + struct sctp_getaddrs_old getaddrs; struct sctp_sockaddr_entry *addr; void __user *to; union sctp_addr temp; @@ -3357,10 +3457,10 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, rwlock_t *addr_lock; int err = 0; - if (len != sizeof(struct sctp_getaddrs)) + if (len != sizeof(struct sctp_getaddrs_old)) return -EINVAL; - if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs_old))) return -EFAULT; if (getaddrs.addr_num <= 0) return -EINVAL; @@ -3392,8 +3492,9 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, addr = list_entry(bp->address_list.next, struct sctp_sockaddr_entry, list); if (sctp_is_any(&addr->a)) { - cnt = sctp_copy_laddrs_to_user(sk, bp->port, - getaddrs.addr_num, to); + cnt = sctp_copy_laddrs_to_user_old(sk, bp->port, + getaddrs.addr_num, + to); if (cnt < 0) { err = cnt; goto unlock; @@ -3419,7 +3520,7 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len, copy_getaddrs: getaddrs.addr_num = cnt; - if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) + if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs_old))) err = -EFAULT; unlock: @@ -3427,6 +3528,99 @@ unlock: return err; } +static int sctp_getsockopt_local_addrs(struct sock *sk, int len, + char __user *optval, int __user *optlen) +{ + struct sctp_bind_addr *bp; + struct sctp_association *asoc; + struct list_head *pos; + int cnt = 0; + struct sctp_getaddrs getaddrs; + struct sctp_sockaddr_entry *addr; + void __user *to; + union sctp_addr temp; + struct sctp_sock *sp = sctp_sk(sk); + int addrlen; + rwlock_t *addr_lock; + int err = 0; + size_t space_left; + int bytes_copied; + + if (len <= sizeof(struct sctp_getaddrs)) + return -EINVAL; + + if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) + return -EFAULT; + + /* + * For UDP-style sockets, id specifies the association to query. + * If the id field is set to the value '0' then the locally bound + * addresses are returned without regard to any particular + * association. + */ + if (0 == getaddrs.assoc_id) { + bp = &sctp_sk(sk)->ep->base.bind_addr; + addr_lock = &sctp_sk(sk)->ep->base.addr_lock; + } else { + asoc = sctp_id2assoc(sk, getaddrs.assoc_id); + if (!asoc) + return -EINVAL; + bp = &asoc->base.bind_addr; + addr_lock = &asoc->base.addr_lock; + } + + to = optval + offsetof(struct sctp_getaddrs,addrs); + space_left = len - sizeof(struct sctp_getaddrs) - + offsetof(struct sctp_getaddrs,addrs); + + sctp_read_lock(addr_lock); + + /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid + * addresses from the global local address list. + */ + if (sctp_list_single_entry(&bp->address_list)) { + addr = list_entry(bp->address_list.next, + struct sctp_sockaddr_entry, list); + if (sctp_is_any(&addr->a)) { + cnt = sctp_copy_laddrs_to_user(sk, bp->port, + &to, space_left); + if (cnt < 0) { + err = cnt; + goto unlock; + } + goto copy_getaddrs; + } + } + + list_for_each(pos, &bp->address_list) { + addr = list_entry(pos, struct sctp_sockaddr_entry, list); + memcpy(&temp, &addr->a, sizeof(temp)); + sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if(space_left < addrlen) + return -ENOMEM; /*fixme: right error?*/ + temp.v4.sin_port = htons(temp.v4.sin_port); + if (copy_to_user(to, &temp, addrlen)) { + err = -EFAULT; + goto unlock; + } + to += addrlen; + cnt ++; + space_left -= addrlen; + } + +copy_getaddrs: + if (put_user(cnt, &((struct sctp_getaddrs __user *)optval)->addr_num)) + return -EFAULT; + bytes_copied = ((char __user *)to) - optval; + if (put_user(bytes_copied, optlen)) + return -EFAULT; + +unlock: + sctp_read_unlock(addr_lock); + return err; +} + /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) * * Requests that the local SCTP stack use the enclosed peer address as @@ -3807,12 +4001,20 @@ SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, case SCTP_INITMSG: retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); break; - case SCTP_GET_PEER_ADDRS_NUM: - retval = sctp_getsockopt_peer_addrs_num(sk, len, optval, + case SCTP_GET_PEER_ADDRS_NUM_OLD: + retval = sctp_getsockopt_peer_addrs_num_old(sk, len, optval, + optlen); + break; + case SCTP_GET_LOCAL_ADDRS_NUM_OLD: + retval = sctp_getsockopt_local_addrs_num_old(sk, len, optval, + optlen); + break; + case SCTP_GET_PEER_ADDRS_OLD: + retval = sctp_getsockopt_peer_addrs_old(sk, len, optval, optlen); break; - case SCTP_GET_LOCAL_ADDRS_NUM: - retval = sctp_getsockopt_local_addrs_num(sk, len, optval, + case SCTP_GET_LOCAL_ADDRS_OLD: + retval = sctp_getsockopt_local_addrs_old(sk, len, optval, optlen); break; case SCTP_GET_PEER_ADDRS: diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c index 25037daf3fa0..cbe2513d2822 100644 --- a/net/sctp/ssnmap.c +++ b/net/sctp/ssnmap.c @@ -58,7 +58,7 @@ static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) * Allocate room to store at least 'len' contiguous TSNs. */ struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ssnmap *retval; int size; diff --git a/net/sctp/transport.c b/net/sctp/transport.c index d2f04ebe5081..6bc27200e6ca 100644 --- a/net/sctp/transport.c +++ b/net/sctp/transport.c @@ -57,7 +57,7 @@ /* Initialize a new transport from provided memory. */ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, const union sctp_addr *addr, - unsigned int __nocast gfp) + gfp_t gfp) { /* Copy in the address. */ peer->ipaddr = *addr; @@ -122,7 +122,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, /* Allocate and initialize a new transport. */ struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_transport *transport; diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c index 0abd5101107c..057e7fac3af0 100644 --- a/net/sctp/ulpevent.c +++ b/net/sctp/ulpevent.c @@ -74,7 +74,7 @@ SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) /* Create a new sctp_ulpevent. */ SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sk_buff *skb; @@ -136,7 +136,7 @@ static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( const struct sctp_association *asoc, __u16 flags, __u16 state, __u16 error, __u16 outbound, - __u16 inbound, unsigned int __nocast gfp) + __u16 inbound, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_assoc_change *sac; @@ -237,7 +237,7 @@ fail: struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( const struct sctp_association *asoc, const struct sockaddr_storage *aaddr, - int flags, int state, int error, unsigned int __nocast gfp) + int flags, int state, int error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_paddr_change *spc; @@ -350,7 +350,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_remote_error( const struct sctp_association *asoc, struct sctp_chunk *chunk, - __u16 flags, unsigned int __nocast gfp) + __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_remote_error *sre; @@ -448,7 +448,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_send_failed( const struct sctp_association *asoc, struct sctp_chunk *chunk, - __u16 flags, __u32 error, unsigned int __nocast gfp) + __u16 flags, __u32 error, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_send_failed *ssf; @@ -557,7 +557,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( const struct sctp_association *asoc, - __u16 flags, unsigned int __nocast gfp) + __u16 flags, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_shutdown_event *sse; @@ -620,7 +620,7 @@ fail: * 5.3.1.6 SCTP_ADAPTION_INDICATION */ struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( - const struct sctp_association *asoc, unsigned int __nocast gfp) + const struct sctp_association *asoc, gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_adaption_event *sai; @@ -657,7 +657,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event = NULL; struct sk_buff *skb; @@ -719,7 +719,7 @@ fail: */ struct sctp_ulpevent *sctp_ulpevent_make_pdapi( const struct sctp_association *asoc, __u32 indication, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_pdapi_event *pd; diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c index ec2c857eae7f..2080b2d28c98 100644 --- a/net/sctp/ulpqueue.c +++ b/net/sctp/ulpqueue.c @@ -100,7 +100,7 @@ void sctp_ulpq_free(struct sctp_ulpq *ulpq) /* Process an incoming DATA chunk. */ int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sk_buff_head temp; sctp_data_chunk_t *hdr; @@ -792,7 +792,7 @@ static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) /* Partial deliver the first message as there is pressure on rwnd. */ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_ulpevent *event; struct sctp_association *asoc; @@ -816,7 +816,7 @@ void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, /* Renege some packets to make room for an incoming chunk. */ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, - unsigned int __nocast gfp) + gfp_t gfp) { struct sctp_association *asoc; __u16 needed, freed; @@ -855,7 +855,7 @@ void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, /* Notify the application if an association is aborted and in * partial delivery mode. Send up any pending received messages. */ -void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, unsigned int __nocast gfp) +void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp) { struct sctp_ulpevent *ev = NULL; struct sock *sk; diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index f3104035e35d..54e60a657500 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -719,7 +719,7 @@ static void rpc_async_schedule(void *arg) void * rpc_malloc(struct rpc_task *task, size_t size) { - int gfp; + gfp_t gfp; if (task->tk_flags & RPC_TASK_SWAPPER) gfp = GFP_ATOMIC; diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index fda737d77edc..cbb0ba34a600 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -163,7 +163,7 @@ static void xfrm_policy_timer(unsigned long data) if (xp->dead) goto out; - dir = xp->index & 7; + dir = xfrm_policy_id2dir(xp->index); if (xp->lft.hard_add_expires_seconds) { long tmo = xp->lft.hard_add_expires_seconds + @@ -225,7 +225,7 @@ expired: * SPD calls. */ -struct xfrm_policy *xfrm_policy_alloc(int gfp) +struct xfrm_policy *xfrm_policy_alloc(gfp_t gfp) { struct xfrm_policy *policy; @@ -417,7 +417,7 @@ struct xfrm_policy *xfrm_policy_byid(int dir, u32 id, int delete) struct xfrm_policy *pol, **p; write_lock_bh(&xfrm_policy_lock); - for (p = &xfrm_policy_list[id & 7]; (pol=*p)!=NULL; p = &pol->next) { + for (p = &xfrm_policy_list[dir]; (pol=*p)!=NULL; p = &pol->next) { if (pol->index == id) { xfrm_pol_hold(pol); if (delete) |