diff options
author | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 18:57:02 +0100 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2010-12-22 18:57:02 +0100 |
commit | 4b7bd364700d9ac8372eff48832062b936d0793b (patch) | |
tree | 0dbf78c95456a0b02d07fcd473281f04a87e266d /net | |
parent | c0d8768af260e2cbb4bf659ae6094a262c86b085 (diff) | |
parent | 90a8a73c06cc32b609a880d48449d7083327e11a (diff) | |
download | blackbird-op-linux-4b7bd364700d9ac8372eff48832062b936d0793b.tar.gz blackbird-op-linux-4b7bd364700d9ac8372eff48832062b936d0793b.zip |
Merge branch 'master' into for-next
Conflicts:
MAINTAINERS
arch/arm/mach-omap2/pm24xx.c
drivers/scsi/bfa/bfa_fcpim.c
Needed to update to apply fixes for which the old branch was too
outdated.
Diffstat (limited to 'net')
100 files changed, 759 insertions, 460 deletions
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c index 799c631f0fed..f7fa67c78766 100644 --- a/net/atm/atm_sysfs.c +++ b/net/atm/atm_sysfs.c @@ -143,12 +143,13 @@ static struct class atm_class = { .dev_uevent = atm_uevent, }; -int atm_register_sysfs(struct atm_dev *adev) +int atm_register_sysfs(struct atm_dev *adev, struct device *parent) { struct device *cdev = &adev->class_dev; int i, j, err; cdev->class = &atm_class; + cdev->parent = parent; dev_set_drvdata(cdev, adev); dev_set_name(cdev, "%s%d", adev->type, adev->number); diff --git a/net/atm/resources.c b/net/atm/resources.c index d29e58261511..23f45ce6f351 100644 --- a/net/atm/resources.c +++ b/net/atm/resources.c @@ -74,8 +74,9 @@ struct atm_dev *atm_dev_lookup(int number) } EXPORT_SYMBOL(atm_dev_lookup); -struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, - int number, unsigned long *flags) +struct atm_dev *atm_dev_register(const char *type, struct device *parent, + const struct atmdev_ops *ops, int number, + unsigned long *flags) { struct atm_dev *dev, *inuse; @@ -115,7 +116,7 @@ struct atm_dev *atm_dev_register(const char *type, const struct atmdev_ops *ops, goto out_fail; } - if (atm_register_sysfs(dev) < 0) { + if (atm_register_sysfs(dev, parent) < 0) { pr_err("atm_register_sysfs failed for dev %s\n", type); atm_proc_dev_deregister(dev); goto out_fail; diff --git a/net/atm/resources.h b/net/atm/resources.h index 126fb1840dfb..521431e30507 100644 --- a/net/atm/resources.h +++ b/net/atm/resources.h @@ -42,6 +42,6 @@ static inline void atm_proc_dev_deregister(struct atm_dev *dev) #endif /* CONFIG_PROC_FS */ -int atm_register_sysfs(struct atm_dev *adev); +int atm_register_sysfs(struct atm_dev *adev, struct device *parent); void atm_unregister_sysfs(struct atm_dev *adev); #endif diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 26eaebf4aaa9..bb86d2932394 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1392,6 +1392,7 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, ax25_cb *ax25; int err = 0; + memset(fsa, 0, sizeof(fsa)); lock_sock(sk); ax25 = ax25_sk(sk); @@ -1403,7 +1404,6 @@ static int ax25_getname(struct socket *sock, struct sockaddr *uaddr, fsa->fsa_ax25.sax25_family = AF_AX25; fsa->fsa_ax25.sax25_call = ax25->dest_addr; - fsa->fsa_ax25.sax25_ndigis = 0; if (ax25->digipeat != NULL) { ndigi = ax25->digipeat->ndigi; diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index bfef5bae0b3a..84093b0000b9 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1175,6 +1175,12 @@ static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, sizeof(cp), &cp); + } else if (!ev->status && conn->out && + conn->sec_level == BT_SECURITY_HIGH) { + struct hci_cp_auth_requested cp; + cp.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, + sizeof(cp), &cp); } else { conn->state = BT_CONNECTED; hci_proto_connect_cfm(conn, ev->status); diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig index 98fdfa1fbddd..86a91543172a 100644 --- a/net/bluetooth/hidp/Kconfig +++ b/net/bluetooth/hidp/Kconfig @@ -1,6 +1,6 @@ config BT_HIDP tristate "HIDP protocol support" - depends on BT && BT_L2CAP && INPUT + depends on BT && BT_L2CAP && INPUT && HID_SUPPORT select HID help HIDP (Human Interface Device Protocol) is a transport layer diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index daa7a988d9a6..cd8f6ea03841 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c @@ -2421,11 +2421,11 @@ static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned break; case 2: - *val = __le16_to_cpu(*((__le16 *) opt->val)); + *val = get_unaligned_le16(opt->val); break; case 4: - *val = __le32_to_cpu(*((__le32 *) opt->val)); + *val = get_unaligned_le32(opt->val); break; default: @@ -2452,11 +2452,11 @@ static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val) break; case 2: - *((__le16 *) opt->val) = cpu_to_le16(val); + put_unaligned_le16(val, opt->val); break; case 4: - *((__le32 *) opt->val) = cpu_to_le32(val); + put_unaligned_le32(val, opt->val); break; default: diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c index 39a5d87e33b4..fa642aa652bd 100644 --- a/net/bluetooth/rfcomm/core.c +++ b/net/bluetooth/rfcomm/core.c @@ -79,7 +79,10 @@ static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); static void rfcomm_process_connect(struct rfcomm_session *s); -static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err); +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, + bdaddr_t *dst, + u8 sec_level, + int *err); static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); static void rfcomm_session_del(struct rfcomm_session *s); @@ -401,7 +404,7 @@ static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, s = rfcomm_session_get(src, dst); if (!s) { - s = rfcomm_session_create(src, dst, &err); + s = rfcomm_session_create(src, dst, d->sec_level, &err); if (!s) return err; } @@ -679,7 +682,10 @@ static void rfcomm_session_close(struct rfcomm_session *s, int err) rfcomm_session_put(s); } -static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst, int *err) +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, + bdaddr_t *dst, + u8 sec_level, + int *err) { struct rfcomm_session *s = NULL; struct sockaddr_l2 addr; @@ -704,6 +710,7 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, bdaddr_t *dst sk = sock->sk; lock_sock(sk); l2cap_pi(sk)->imtu = l2cap_mtu; + l2cap_pi(sk)->sec_level = sec_level; if (l2cap_ertm) l2cap_pi(sk)->mode = L2CAP_MODE_ERTM; release_sock(sk); diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index d0927d1fdada..66b9e5c0523a 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -882,7 +882,7 @@ static int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 type) int lm = 0; if (type != SCO_LINK && type != ESCO_LINK) - return 0; + return -EINVAL; BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr)); @@ -908,7 +908,7 @@ static int sco_connect_cfm(struct hci_conn *hcon, __u8 status) BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status); if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) - return 0; + return -EINVAL; if (!status) { struct sco_conn *conn; @@ -927,7 +927,7 @@ static int sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) BT_DBG("hcon %p reason %d", hcon, reason); if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) - return 0; + return -EINVAL; sco_conn_del(hcon, bt_err(reason)); diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c index 76ae68303d3a..d522d8c1703e 100644 --- a/net/caif/caif_config_util.c +++ b/net/caif/caif_config_util.c @@ -16,11 +16,18 @@ int connect_req_to_link_param(struct cfcnfg *cnfg, { struct dev_info *dev_info; enum cfcnfg_phy_preference pref; + int res; + memset(l, 0, sizeof(*l)); - l->priority = s->priority; + /* In caif protocol low value is high priority */ + l->priority = CAIF_PRIO_MAX - s->priority + 1; - if (s->link_name[0] != '\0') - l->phyid = cfcnfg_get_named(cnfg, s->link_name); + if (s->ifindex != 0){ + res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex); + if (res < 0) + return res; + l->phyid = res; + } else { switch (s->link_selector) { case CAIF_LINK_HIGH_BANDW: diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c index b99369a055d1..a42a408306e4 100644 --- a/net/caif/caif_dev.c +++ b/net/caif/caif_dev.c @@ -307,6 +307,8 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what, case NETDEV_UNREGISTER: caifd = caif_get(dev); + if (caifd == NULL) + break; netdev_info(dev, "unregister\n"); atomic_set(&caifd->state, what); caif_device_destroy(dev); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 2eca2dd0000f..1bf0cf503796 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -716,8 +716,7 @@ static int setsockopt(struct socket *sock, { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); - int prio, linksel; - struct ifreq ifreq; + int linksel; if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED) return -ENOPROTOOPT; @@ -735,33 +734,6 @@ static int setsockopt(struct socket *sock, release_sock(&cf_sk->sk); return 0; - case SO_PRIORITY: - if (lvl != SOL_SOCKET) - goto bad_sol; - if (ol < sizeof(int)) - return -EINVAL; - if (copy_from_user(&prio, ov, sizeof(int))) - return -EINVAL; - lock_sock(&(cf_sk->sk)); - cf_sk->conn_req.priority = prio; - release_sock(&cf_sk->sk); - return 0; - - case SO_BINDTODEVICE: - if (lvl != SOL_SOCKET) - goto bad_sol; - if (ol < sizeof(struct ifreq)) - return -EINVAL; - if (copy_from_user(&ifreq, ov, sizeof(ifreq))) - return -EFAULT; - lock_sock(&(cf_sk->sk)); - strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name, - sizeof(cf_sk->conn_req.link_name)); - cf_sk->conn_req.link_name - [sizeof(cf_sk->conn_req.link_name)-1] = 0; - release_sock(&cf_sk->sk); - return 0; - case CAIFSO_REQ_PARAM: if (lvl != SOL_CAIF) goto bad_sol; @@ -880,6 +852,18 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; + /* Check priority value comming from socket */ + /* if priority value is out of range it will be ajusted */ + if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) + cf_sk->conn_req.priority = CAIF_PRIO_MAX; + else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) + cf_sk->conn_req.priority = CAIF_PRIO_MIN; + else + cf_sk->conn_req.priority = cf_sk->sk.sk_priority; + + /*ifindex = id of the interface.*/ + cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; + dbfs_atomic_inc(&cnt.num_connect_req); cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(&cf_sk->conn_req, @@ -905,6 +889,7 @@ static int caif_connect(struct socket *sock, struct sockaddr *uaddr, cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); + err = -ENODEV; goto out; } @@ -1142,7 +1127,7 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, set_rx_flow_on(cf_sk); /* Set default options on configuration */ - cf_sk->conn_req.priority = CAIF_PRIO_NORMAL; + cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL; cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; /* Increase the number of sockets created. */ diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c index 41adafd18914..21ede141018a 100644 --- a/net/caif/cfcnfg.c +++ b/net/caif/cfcnfg.c @@ -173,18 +173,15 @@ static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg, return NULL; } -int cfcnfg_get_named(struct cfcnfg *cnfg, char *name) + +int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi) { int i; - - /* Try to match with specified name */ - for (i = 0; i < MAX_PHY_LAYERS; i++) { - if (cnfg->phy_layers[i].frm_layer != NULL - && strcmp(cnfg->phy_layers[i].phy_layer->name, - name) == 0) - return cnfg->phy_layers[i].frm_layer->id; - } - return 0; + for (i = 0; i < MAX_PHY_LAYERS; i++) + if (cnfg->phy_layers[i].frm_layer != NULL && + cnfg->phy_layers[i].ifindex == ifi) + return i; + return -ENODEV; } int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer) diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index 08f267a109aa..3cd8f978e309 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -361,11 +361,10 @@ void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer) struct cfctrl_request_info *p, *tmp; struct cfctrl *ctrl = container_obj(layr); spin_lock(&ctrl->info_list_lock); - pr_warn("enter\n"); list_for_each_entry_safe(p, tmp, &ctrl->list, list) { if (p->client_layer == adap_layer) { - pr_warn("cancel req :%d\n", p->sequence_no); + pr_debug("cancel req :%d\n", p->sequence_no); list_del(&p->list); kfree(p); } diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c index 496fda9ac66f..11a2af4c162a 100644 --- a/net/caif/cfdbgl.c +++ b/net/caif/cfdbgl.c @@ -12,6 +12,8 @@ #include <net/caif/cfsrvl.h> #include <net/caif/cfpkt.h> +#define container_obj(layr) ((struct cfsrvl *) layr) + static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt); @@ -38,5 +40,17 @@ static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt) static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt) { + struct cfsrvl *service = container_obj(layr); + struct caif_payload_info *info; + int ret; + + if (!cfsrvl_ready(service, &ret)) + return ret; + + /* Add info for MUX-layer to route the packet out */ + info = cfpkt_info(pkt); + info->channel_id = service->layer.id; + info->dev_info = &service->dev_info; + return layr->dn->transmit(layr->dn, pkt); } diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c index bde8481e8d25..e2fb5fa75795 100644 --- a/net/caif/cfrfml.c +++ b/net/caif/cfrfml.c @@ -193,7 +193,7 @@ out: static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt) { - caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size); + caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size); /* Add info for MUX-layer to route the packet out. */ cfpkt_info(pkt)->channel_id = rfml->serv.layer.id; diff --git a/net/can/bcm.c b/net/can/bcm.c index 08ffe9e4be20..6faa8256e10c 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c @@ -125,7 +125,7 @@ struct bcm_sock { struct list_head tx_ops; unsigned long dropped_usr_msgs; struct proc_dir_entry *bcm_proc_read; - char procname [9]; /* pointer printed in ASCII with \0 */ + char procname [20]; /* pointer printed in ASCII with \0 */ }; static inline struct bcm_sock *bcm_sk(const struct sock *sk) diff --git a/net/ceph/Makefile b/net/ceph/Makefile index aab1cabb8035..5f19415ec9c0 100644 --- a/net/ceph/Makefile +++ b/net/ceph/Makefile @@ -1,9 +1,6 @@ # # Makefile for CEPH filesystem. # - -ifneq ($(KERNELRELEASE),) - obj-$(CONFIG_CEPH_LIB) += libceph.o libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ @@ -16,22 +13,3 @@ libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ ceph_fs.o ceph_strings.o ceph_hash.o \ pagevec.o -else -#Otherwise we were called directly from the command -# line; invoke the kernel build system. - -KERNELDIR ?= /lib/modules/$(shell uname -r)/build -PWD := $(shell pwd) - -default: all - -all: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules - -modules_install: - $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install - -clean: - $(MAKE) -C $(KERNELDIR) M=$(PWD) clean - -endif diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c index 53d8abfa25d5..bf3e6a13c215 100644 --- a/net/ceph/buffer.c +++ b/net/ceph/buffer.c @@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) if (b->vec.iov_base) { b->is_vmalloc = false; } else { - b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); + b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL); if (!b->vec.iov_base) { kfree(b); return NULL; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0e8157ee5d43..b6ff4a1519ab 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -97,11 +97,9 @@ struct workqueue_struct *ceph_msgr_wq; int ceph_msgr_init(void) { ceph_msgr_wq = create_workqueue("ceph-msgr"); - if (IS_ERR(ceph_msgr_wq)) { - int ret = PTR_ERR(ceph_msgr_wq); - pr_err("msgr_init failed to create workqueue: %d\n", ret); - ceph_msgr_wq = NULL; - return ret; + if (!ceph_msgr_wq) { + pr_err("msgr_init failed to create workqueue\n"); + return -ENOMEM; } return 0; } @@ -540,8 +538,7 @@ static void prepare_write_message(struct ceph_connection *con) /* initialize page iterator */ con->out_msg_pos.page = 0; if (m->pages) - con->out_msg_pos.page_pos = - le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK; + con->out_msg_pos.page_pos = m->page_alignment; else con->out_msg_pos.page_pos = 0; con->out_msg_pos.data_pos = 0; @@ -1491,7 +1488,7 @@ static int read_partial_message(struct ceph_connection *con) struct ceph_msg *m = con->in_msg; int ret; int to, left; - unsigned front_len, middle_len, data_len, data_off; + unsigned front_len, middle_len, data_len; int datacrc = con->msgr->nocrc; int skip; u64 seq; @@ -1527,19 +1524,17 @@ static int read_partial_message(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); if (data_len > CEPH_MSG_MAX_DATA_LEN) return -EIO; - data_off = le16_to_cpu(con->in_hdr.data_off); /* verify seq# */ seq = le64_to_cpu(con->in_hdr.seq); if ((s64)seq - (s64)con->in_seq < 1) { - pr_info("skipping %s%lld %s seq %lld, expected %lld\n", + pr_info("skipping %s%lld %s seq %lld expected %lld\n", ENTITY_NAME(con->peer_name), ceph_pr_addr(&con->peer_addr.in_addr), seq, con->in_seq + 1); con->in_base_pos = -front_len - middle_len - data_len - sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; - con->in_seq++; return 0; } else if ((s64)seq - (s64)con->in_seq > 1) { pr_err("read_partial_message bad seq %lld expected %lld\n", @@ -1576,7 +1571,7 @@ static int read_partial_message(struct ceph_connection *con) con->in_msg_pos.page = 0; if (m->pages) - con->in_msg_pos.page_pos = data_off & ~PAGE_MASK; + con->in_msg_pos.page_pos = m->page_alignment; else con->in_msg_pos.page_pos = 0; con->in_msg_pos.data_pos = 0; @@ -2301,6 +2296,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) /* data */ m->nr_pages = 0; + m->page_alignment = 0; m->pages = NULL; m->pagelist = NULL; m->bio = NULL; @@ -2370,6 +2366,7 @@ static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con, type, front_len); return NULL; } + msg->page_alignment = le16_to_cpu(hdr->data_off); } memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 79391994b3ed..3e20a122ffa2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -71,6 +71,7 @@ void ceph_calc_raw_layout(struct ceph_osd_client *osdc, op->extent.length = objlen; } req->r_num_pages = calc_pages_for(off, *plen); + req->r_page_alignment = off & ~PAGE_MASK; if (op->op == CEPH_OSD_OP_WRITE) op->payload_len = *plen; @@ -390,6 +391,8 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, req->r_request->hdr.data_len = cpu_to_le32(data_len); } + req->r_request->page_alignment = req->r_page_alignment; + BUG_ON(p > msg->front.iov_base + msg->front.iov_len); msg_size = p - msg->front.iov_base; msg->front.iov_len = msg_size; @@ -419,7 +422,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, - bool use_mempool, int num_reply) + bool use_mempool, int num_reply, + int page_align) { struct ceph_osd_req_op ops[3]; struct ceph_osd_request *req; @@ -447,6 +451,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, calc_layout(osdc, vino, layout, off, plen, req, ops); req->r_file_layout = *layout; /* keep a copy */ + /* in case it differs from natural alignment that calc_layout + filled in for us */ + req->r_page_alignment = page_align; + ceph_osdc_build_request(req, off, plen, ops, snapc, mtime, @@ -1489,7 +1497,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, u32 truncate_seq, u64 truncate_size, - struct page **pages, int num_pages) + struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; int rc = 0; @@ -1499,15 +1507,15 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, req = ceph_osdc_new_request(osdc, layout, vino, off, plen, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 0, truncate_seq, truncate_size, NULL, - false, 1); + false, 1, page_align); if (!req) return -ENOMEM; /* it may be a short read due to an object boundary */ req->r_pages = pages; - dout("readpages final extent is %llu~%llu (%d pages)\n", - off, *plen, req->r_num_pages); + dout("readpages final extent is %llu~%llu (%d pages align %d)\n", + off, *plen, req->r_num_pages, page_align); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) @@ -1533,6 +1541,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, { struct ceph_osd_request *req; int rc = 0; + int page_align = off & ~PAGE_MASK; BUG_ON(vino.snap != CEPH_NOSNAP); req = ceph_osdc_new_request(osdc, layout, vino, off, &len, @@ -1541,7 +1550,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, CEPH_OSD_FLAG_WRITE, snapc, do_sync, truncate_seq, truncate_size, mtime, - nofail, 1); + nofail, 1, page_align); if (!req) return -ENOMEM; @@ -1638,8 +1647,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = ceph_msg_get(req->r_reply); if (data_len > 0) { - unsigned data_off = le16_to_cpu(hdr->data_off); - int want = calc_pages_for(data_off & ~PAGE_MASK, data_len); + int want = calc_pages_for(req->r_page_alignment, data_len); if (unlikely(req->r_num_pages < want)) { pr_warning("tid %lld reply %d > expected %d pages\n", @@ -1651,6 +1659,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, } m->pages = req->r_pages; m->nr_pages = req->r_num_pages; + m->page_alignment = req->r_page_alignment; #ifdef CONFIG_BLOCK m->bio = req->r_bio; #endif diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 54caf0687155..1a040e64c69f 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c @@ -13,8 +13,7 @@ * build a vector of user pages */ struct page **ceph_get_direct_page_vector(const char __user *data, - int num_pages, - loff_t off, size_t len) + int num_pages, bool write_page) { struct page **pages; int rc; @@ -25,24 +24,27 @@ struct page **ceph_get_direct_page_vector(const char __user *data, down_read(¤t->mm->mmap_sem); rc = get_user_pages(current, current->mm, (unsigned long)data, - num_pages, 0, 0, pages, NULL); + num_pages, write_page, 0, pages, NULL); up_read(¤t->mm->mmap_sem); - if (rc < 0) + if (rc < num_pages) goto fail; return pages; fail: - kfree(pages); + ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); return ERR_PTR(rc); } EXPORT_SYMBOL(ceph_get_direct_page_vector); -void ceph_put_page_vector(struct page **pages, int num_pages) +void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) { int i; - for (i = 0; i < num_pages; i++) + for (i = 0; i < num_pages; i++) { + if (dirty) + set_page_dirty_lock(pages[i]); put_page(pages[i]); + } kfree(pages); } EXPORT_SYMBOL(ceph_put_page_vector); diff --git a/net/core/dev.c b/net/core/dev.c index 89204e8c0e14..126694116852 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2131,7 +2131,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev, } else { struct sock *sk = skb->sk; queue_index = sk_tx_queue_get(sk); - if (queue_index < 0) { + if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) { queue_index = 0; if (dev->real_num_tx_queues > 1) diff --git a/net/core/dst.c b/net/core/dst.c index 8abe628b79f1..b99c7c7ffce2 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -370,6 +370,7 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, static struct notifier_block dst_dev_notifier = { .notifier_call = dst_dev_event, + .priority = -10, /* must be called after other network notifiers */ }; void __init dst_init(void) diff --git a/net/core/filter.c b/net/core/filter.c index 7beaec36b541..ae21a0d3c4a2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -112,39 +112,41 @@ EXPORT_SYMBOL(sk_filter); */ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen) { - struct sock_filter *fentry; /* We walk down these */ void *ptr; u32 A = 0; /* Accumulator */ u32 X = 0; /* Index Register */ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */ + unsigned long memvalid = 0; u32 tmp; int k; int pc; + BUILD_BUG_ON(BPF_MEMWORDS > BITS_PER_LONG); /* * Process array of filter instructions. */ for (pc = 0; pc < flen; pc++) { - fentry = &filter[pc]; + const struct sock_filter *fentry = &filter[pc]; + u32 f_k = fentry->k; switch (fentry->code) { case BPF_S_ALU_ADD_X: A += X; continue; case BPF_S_ALU_ADD_K: - A += fentry->k; + A += f_k; continue; case BPF_S_ALU_SUB_X: A -= X; continue; case BPF_S_ALU_SUB_K: - A -= fentry->k; + A -= f_k; continue; case BPF_S_ALU_MUL_X: A *= X; continue; case BPF_S_ALU_MUL_K: - A *= fentry->k; + A *= f_k; continue; case BPF_S_ALU_DIV_X: if (X == 0) @@ -152,49 +154,49 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int A /= X; continue; case BPF_S_ALU_DIV_K: - A /= fentry->k; + A /= f_k; continue; case BPF_S_ALU_AND_X: A &= X; continue; case BPF_S_ALU_AND_K: - A &= fentry->k; + A &= f_k; continue; case BPF_S_ALU_OR_X: A |= X; continue; case BPF_S_ALU_OR_K: - A |= fentry->k; + A |= f_k; continue; case BPF_S_ALU_LSH_X: A <<= X; continue; case BPF_S_ALU_LSH_K: - A <<= fentry->k; + A <<= f_k; continue; case BPF_S_ALU_RSH_X: A >>= X; continue; case BPF_S_ALU_RSH_K: - A >>= fentry->k; + A >>= f_k; continue; case BPF_S_ALU_NEG: A = -A; continue; case BPF_S_JMP_JA: - pc += fentry->k; + pc += f_k; continue; case BPF_S_JMP_JGT_K: - pc += (A > fentry->k) ? fentry->jt : fentry->jf; + pc += (A > f_k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGE_K: - pc += (A >= fentry->k) ? fentry->jt : fentry->jf; + pc += (A >= f_k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JEQ_K: - pc += (A == fentry->k) ? fentry->jt : fentry->jf; + pc += (A == f_k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JSET_K: - pc += (A & fentry->k) ? fentry->jt : fentry->jf; + pc += (A & f_k) ? fentry->jt : fentry->jf; continue; case BPF_S_JMP_JGT_X: pc += (A > X) ? fentry->jt : fentry->jf; @@ -209,7 +211,7 @@ unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int pc += (A & X) ? fentry->jt : fentry->jf; continue; case BPF_S_LD_W_ABS: - k = fentry->k; + k = f_k; load_w: ptr = load_pointer(skb, k, 4, &tmp); if (ptr != NULL) { @@ -218,7 +220,7 @@ load_w: } break; case BPF_S_LD_H_ABS: - k = fentry->k; + k = f_k; load_h: ptr = load_pointer(skb, k, 2, &tmp); if (ptr != NULL) { @@ -227,7 +229,7 @@ load_h: } break; case BPF_S_LD_B_ABS: - k = fentry->k; + k = f_k; load_b: ptr = load_pointer(skb, k, 1, &tmp); if (ptr != NULL) { @@ -242,32 +244,34 @@ load_b: X = skb->len; continue; case BPF_S_LD_W_IND: - k = X + fentry->k; + k = X + f_k; goto load_w; case BPF_S_LD_H_IND: - k = X + fentry->k; + k = X + f_k; goto load_h; case BPF_S_LD_B_IND: - k = X + fentry->k; + k = X + f_k; goto load_b; case BPF_S_LDX_B_MSH: - ptr = load_pointer(skb, fentry->k, 1, &tmp); + ptr = load_pointer(skb, f_k, 1, &tmp); if (ptr != NULL) { X = (*(u8 *)ptr & 0xf) << 2; continue; } return 0; case BPF_S_LD_IMM: - A = fentry->k; + A = f_k; continue; case BPF_S_LDX_IMM: - X = fentry->k; + X = f_k; continue; case BPF_S_LD_MEM: - A = mem[fentry->k]; + A = (memvalid & (1UL << f_k)) ? + mem[f_k] : 0; continue; case BPF_S_LDX_MEM: - X = mem[fentry->k]; + X = (memvalid & (1UL << f_k)) ? + mem[f_k] : 0; continue; case BPF_S_MISC_TAX: X = A; @@ -276,14 +280,16 @@ load_b: A = X; continue; case BPF_S_RET_K: - return fentry->k; + return f_k; case BPF_S_RET_A: return A; case BPF_S_ST: - mem[fentry->k] = A; + memvalid |= 1UL << f_k; + mem[f_k] = A; continue; case BPF_S_STX: - mem[fentry->k] = X; + memvalid |= 1UL << f_k; + mem[f_k] = X; continue; default: WARN_ON(1); @@ -583,23 +589,16 @@ int sk_chk_filter(struct sock_filter *filter, int flen) EXPORT_SYMBOL(sk_chk_filter); /** - * sk_filter_rcu_release: Release a socket filter by rcu_head + * sk_filter_release_rcu - Release a socket filter by rcu_head * @rcu: rcu_head that contains the sk_filter to free */ -static void sk_filter_rcu_release(struct rcu_head *rcu) +void sk_filter_release_rcu(struct rcu_head *rcu) { struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu); - sk_filter_release(fp); -} - -static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) -{ - unsigned int size = sk_filter_len(fp); - - atomic_sub(size, &sk->sk_omem_alloc); - call_rcu_bh(&fp->rcu, sk_filter_rcu_release); + kfree(fp); } +EXPORT_SYMBOL(sk_filter_release_rcu); /** * sk_attach_filter - attach a socket filter @@ -643,7 +642,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk) rcu_assign_pointer(sk->sk_filter, fp); if (old_fp) - sk_filter_delayed_uncharge(sk, old_fp); + sk_filter_uncharge(sk, old_fp); return 0; } EXPORT_SYMBOL_GPL(sk_attach_filter); @@ -657,7 +656,7 @@ int sk_detach_filter(struct sock *sk) sock_owned_by_user(sk)); if (filter) { rcu_assign_pointer(sk->sk_filter, NULL); - sk_filter_delayed_uncharge(sk, filter); + sk_filter_uncharge(sk, filter); ret = 0; } return ret; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index a5ff5a89f376..7f902cad10f8 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -712,15 +712,21 @@ static void rx_queue_release(struct kobject *kobj) map = rcu_dereference_raw(queue->rps_map); - if (map) + if (map) { + RCU_INIT_POINTER(queue->rps_map, NULL); call_rcu(&map->rcu, rps_map_release); + } flow_table = rcu_dereference_raw(queue->rps_flow_table); - if (flow_table) + if (flow_table) { + RCU_INIT_POINTER(queue->rps_flow_table, NULL); call_rcu(&flow_table->rcu, rps_dev_flow_table_release); + } if (atomic_dec_and_test(&first->count)) kfree(first); + else + memset(kobj, 0, sizeof(*kobj)); } static struct kobj_type rx_queue_ktype = { diff --git a/net/core/pktgen.c b/net/core/pktgen.c index fbce4b05a53e..33bc3823ac6f 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -887,7 +887,7 @@ static ssize_t pktgen_if_write(struct file *file, i += len; if (debug) { - size_t copy = min(count, 1023); + size_t copy = min_t(size_t, count, 1023); char tb[copy + 1]; if (copy_from_user(tb, user_buffer, copy)) return -EFAULT; @@ -2612,8 +2612,8 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev, /* Update any of the values, used when we're incrementing various * fields. */ - queue_map = pkt_dev->cur_queue_map; mod_cur_headers(pkt_dev); + queue_map = pkt_dev->cur_queue_map; datalen = (odev->hard_header_len + 16) & ~0xf; @@ -2976,8 +2976,8 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, /* Update any of the values, used when we're incrementing various * fields. */ - queue_map = pkt_dev->cur_queue_map; mod_cur_headers(pkt_dev); + queue_map = pkt_dev->cur_queue_map; skb = __netdev_alloc_skb(odev, pkt_dev->cur_pkt_size + 64 diff --git a/net/core/request_sock.c b/net/core/request_sock.c index 7552495aff7a..fceeb37d7161 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); lopt_size += nr_table_entries * sizeof(struct request_sock *); if (lopt_size > PAGE_SIZE) - lopt = __vmalloc(lopt_size, - GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, - PAGE_KERNEL); + lopt = vzalloc(lopt_size); else lopt = kzalloc(lopt_size, GFP_KERNEL); if (lopt == NULL) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 8121268ddbdd..841c287ef40a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -347,16 +347,17 @@ static size_t rtnl_link_get_size(const struct net_device *dev) if (!ops) return 0; - size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ - nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ + size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */ + nla_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */ if (ops->get_size) /* IFLA_INFO_DATA + nested data */ - size += nlmsg_total_size(sizeof(struct nlattr)) + + size += nla_total_size(sizeof(struct nlattr)) + ops->get_size(dev); if (ops->get_xstats_size) - size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */ + /* IFLA_INFO_XSTATS */ + size += nla_total_size(ops->get_xstats_size(dev)); return size; } diff --git a/net/core/sock.c b/net/core/sock.c index 3eed5424e659..fb6080111461 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1653,10 +1653,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) { struct proto *prot = sk->sk_prot; int amt = sk_mem_pages(size); - int allocated; + long allocated; sk->sk_forward_alloc += amt * SK_MEM_QUANTUM; - allocated = atomic_add_return(amt, prot->memory_allocated); + allocated = atomic_long_add_return(amt, prot->memory_allocated); /* Under limit. */ if (allocated <= prot->sysctl_mem[0]) { @@ -1714,7 +1714,7 @@ suppress_allocation: /* Alas. Undo changes. */ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM; - atomic_sub(amt, prot->memory_allocated); + atomic_long_sub(amt, prot->memory_allocated); return 0; } EXPORT_SYMBOL(__sk_mem_schedule); @@ -1727,12 +1727,12 @@ void __sk_mem_reclaim(struct sock *sk) { struct proto *prot = sk->sk_prot; - atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, + atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, prot->memory_allocated); sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1; if (prot->memory_pressure && *prot->memory_pressure && - (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0])) + (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0])) *prot->memory_pressure = 0; } EXPORT_SYMBOL(__sk_mem_reclaim); @@ -2452,12 +2452,12 @@ static char proto_method_implemented(const void *method) static void proto_seq_printf(struct seq_file *seq, struct proto *proto) { - seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s " + seq_printf(seq, "%-9s %4u %6d %6ld %-3s %6u %-3s %-10s " "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n", proto->name, proto->obj_size, sock_prot_inuse_get(seq_file_net(seq), proto), - proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1, + proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L, proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI", proto->max_header, proto->slab == NULL ? "no" : "yes", diff --git a/net/core/timestamping.c b/net/core/timestamping.c index 0ae6c22da85b..c19bb4ee405e 100644 --- a/net/core/timestamping.c +++ b/net/core/timestamping.c @@ -96,11 +96,13 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb) struct phy_device *phydev; unsigned int type; - skb_push(skb, ETH_HLEN); + if (skb_headroom(skb) < ETH_HLEN) + return false; + __skb_push(skb, ETH_HLEN); type = classify(skb); - skb_pull(skb, ETH_HLEN); + __skb_pull(skb, ETH_HLEN); switch (type) { case PTP_CLASS_V1_IPV4: diff --git a/net/dccp/input.c b/net/dccp/input.c index 265985370fa1..e424a09e83f6 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c @@ -239,7 +239,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb) dccp_update_gsr(sk, seqno); if (dh->dccph_type != DCCP_PKT_SYNC && - (ackno != DCCP_PKT_WITHOUT_ACK_SEQ)) + ackno != DCCP_PKT_WITHOUT_ACK_SEQ && + after48(ackno, dp->dccps_gar)) dp->dccps_gar = ackno; } else { unsigned long now = jiffies; diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c index d6b93d19790f..6f97268ed85f 100644 --- a/net/decnet/af_decnet.c +++ b/net/decnet/af_decnet.c @@ -155,7 +155,7 @@ static const struct proto_ops dn_proto_ops; static DEFINE_RWLOCK(dn_hash_lock); static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; static struct hlist_head dn_wild_sk; -static atomic_t decnet_memory_allocated; +static atomic_long_t decnet_memory_allocated; static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); @@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us if (r_len > sizeof(struct linkinfo_dn)) r_len = sizeof(struct linkinfo_dn); + memset(&link, 0, sizeof(link)); + switch(sock->state) { case SS_CONNECTING: link.idn_linkstate = LL_CONNECTING; diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c index be3eb8e23288..28f8b5e5f73b 100644 --- a/net/decnet/sysctl_net_decnet.c +++ b/net/decnet/sysctl_net_decnet.c @@ -38,7 +38,7 @@ int decnet_log_martians = 1; int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW; /* Reasonable defaults, I hope, based on tcp's defaults */ -int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; +long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 }; int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; @@ -324,7 +324,7 @@ static ctl_table dn_table[] = { .data = &sysctl_decnet_mem, .maxlen = sizeof(sysctl_decnet_mem), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax }, { .procname = "decnet_rmem", diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c index f8c1ae4b41f0..15dcc1a586b4 100644 --- a/net/econet/af_econet.c +++ b/net/econet/af_econet.c @@ -31,6 +31,7 @@ #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/slab.h> +#include <linux/vmalloc.h> #include <net/sock.h> #include <net/inet_common.h> #include <linux/stat.h> @@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, #endif #ifdef CONFIG_ECONET_AUNUDP struct msghdr udpmsg; - struct iovec iov[msg->msg_iovlen+1]; + struct iovec iov[2]; struct aunhdr ah; struct sockaddr_in udpdest; __kernel_size_t size; - int i; mm_segment_t oldfs; + char *userbuf; #endif /* @@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, mutex_lock(&econet_mutex); - if (saddr == NULL) { - struct econet_sock *eo = ec_sk(sk); - - addr.station = eo->station; - addr.net = eo->net; - port = eo->port; - cb = eo->cb; - } else { - if (msg->msg_namelen < sizeof(struct sockaddr_ec)) { - mutex_unlock(&econet_mutex); - return -EINVAL; - } - addr.station = saddr->addr.station; - addr.net = saddr->addr.net; - port = saddr->port; - cb = saddr->cb; - } + if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) { + mutex_unlock(&econet_mutex); + return -EINVAL; + } + addr.station = saddr->addr.station; + addr.net = saddr->addr.net; + port = saddr->port; + cb = saddr->cb; /* Look for a device with the right network number. */ dev = net2dev_map[addr.net]; @@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, } } - if (len + 15 > dev->mtu) { - mutex_unlock(&econet_mutex); - return -EMSGSIZE; - } - if (dev->type == ARPHRD_ECONET) { /* Real hardware Econet. We're not worthy etc. */ #ifdef CONFIG_ECONET_NATIVE unsigned short proto = 0; int res; + if (len + 15 > dev->mtu) { + mutex_unlock(&econet_mutex); + return -EMSGSIZE; + } + dev_hold(dev); skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), @@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, eb = (struct ec_cb *)&skb->cb; - /* BUG: saddr may be NULL */ eb->cookie = saddr->cookie; eb->sec = *saddr; eb->sent = ec_tx_done; @@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, return -ENETDOWN; /* No socket - can't send */ } + if (len > 32768) { + err = -E2BIG; + goto error; + } + /* Make up a UDP datagram and hand it off to some higher intellect. */ memset(&udpdest, 0, sizeof(udpdest)); @@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, /* tack our header on the front of the iovec */ size = sizeof(struct aunhdr); - /* - * XXX: that is b0rken. We can't mix userland and kernel pointers - * in iovec, since on a lot of platforms copy_from_user() will - * *not* work with the kernel and userland ones at the same time, - * regardless of what we do with set_fs(). And we are talking about - * econet-over-ethernet here, so "it's only ARM anyway" doesn't - * apply. Any suggestions on fixing that code? -- AV - */ iov[0].iov_base = (void *)&ah; iov[0].iov_len = size; - for (i = 0; i < msg->msg_iovlen; i++) { - void __user *base = msg->msg_iov[i].iov_base; - size_t iov_len = msg->msg_iov[i].iov_len; - /* Check it now since we switch to KERNEL_DS later. */ - if (!access_ok(VERIFY_READ, base, iov_len)) { - mutex_unlock(&econet_mutex); - return -EFAULT; - } - iov[i+1].iov_base = base; - iov[i+1].iov_len = iov_len; - size += iov_len; + + userbuf = vmalloc(len); + if (userbuf == NULL) { + err = -ENOMEM; + goto error; } + iov[1].iov_base = userbuf; + iov[1].iov_len = len; + err = memcpy_fromiovec(userbuf, msg->msg_iov, len); + if (err) + goto error_free_buf; + /* Get a skbuff (no data, just holds our cb information) */ if ((skb = sock_alloc_send_skb(sk, 0, msg->msg_flags & MSG_DONTWAIT, - &err)) == NULL) { - mutex_unlock(&econet_mutex); - return err; - } + &err)) == NULL) + goto error_free_buf; eb = (struct ec_cb *)&skb->cb; @@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, udpmsg.msg_name = (void *)&udpdest; udpmsg.msg_namelen = sizeof(udpdest); udpmsg.msg_iov = &iov[0]; - udpmsg.msg_iovlen = msg->msg_iovlen + 1; + udpmsg.msg_iovlen = 2; udpmsg.msg_control = NULL; udpmsg.msg_controllen = 0; udpmsg.msg_flags=0; @@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock, oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ err = sock_sendmsg(udpsock, &udpmsg, size); set_fs(oldfs); + +error_free_buf: + vfree(userbuf); #else err = -EPROTOTYPE; #endif + error: mutex_unlock(&econet_mutex); return err; @@ -671,6 +661,11 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg) err = 0; switch (cmd) { case SIOCSIFADDR: + if (!capable(CAP_NET_ADMIN)) { + err = -EPERM; + break; + } + edev = dev->ec_ptr; if (edev == NULL) { /* Magic up a new one. */ @@ -856,9 +851,13 @@ static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len) { struct iphdr *ip = ip_hdr(skb); unsigned char stn = ntohl(ip->saddr) & 0xff; + struct dst_entry *dst = skb_dst(skb); + struct ec_device *edev = NULL; struct sock *sk = NULL; struct sk_buff *newskb; - struct ec_device *edev = skb->dev->ec_ptr; + + if (dst) + edev = dst->dev->ec_ptr; if (! edev) goto bad; diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h index a29edf2219c8..c079cc0ec651 100644 --- a/net/ipv4/fib_lookup.h +++ b/net/ipv4/fib_lookup.h @@ -47,11 +47,8 @@ extern int fib_detect_death(struct fib_info *fi, int order, static inline void fib_result_assign(struct fib_result *res, struct fib_info *fi) { - if (res->fi != NULL) - fib_info_put(res->fi); + /* we used to play games with refcounts, but we now use RCU */ res->fi = fi; - if (fi != NULL) - atomic_inc(&fi->fib_clntref); } #endif /* _FIB_LOOKUP_H */ diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 200eb538fbb3..0f280348e0fd 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size) if (size <= PAGE_SIZE) return kzalloc(size, GFP_KERNEL); else - return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + return vzalloc(size); } static void __tnode_vfree(struct work_struct *arg) diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 96bc7f9475a3..e5d1a44bcbdf 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -569,6 +569,9 @@ void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info) /* No need to clone since we're just using its address. */ rt2 = rt; + if (!fl.nl_u.ip4_u.saddr) + fl.nl_u.ip4_u.saddr = rt->rt_src; + err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0); switch (err) { case 0: diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index c8877c6c7216..3c53c2d89e3b 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -2306,10 +2306,8 @@ void ip_mc_drop_socket(struct sock *sk) in_dev = inetdev_by_index(net, iml->multi.imr_ifindex); (void) ip_mc_leave_src(sk, iml, in_dev); - if (in_dev != NULL) { + if (in_dev != NULL) ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr); - in_dev_put(in_dev); - } /* decrease mem now to avoid the memleak warning */ atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); call_rcu(&iml->rcu, ip_mc_socklist_reclaim); diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index ba8042665849..2ada17129fce 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -490,9 +490,11 @@ static int inet_csk_diag_dump(struct sock *sk, { struct inet_diag_req *r = NLMSG_DATA(cb->nlh); - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { struct inet_diag_entry entry; - struct rtattr *bc = (struct rtattr *)(r + 1); + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); struct inet_sock *inet = inet_sk(sk); entry.family = sk->sk_family; @@ -512,7 +514,7 @@ static int inet_csk_diag_dump(struct sock *sk, entry.dport = ntohs(inet->inet_dport); entry.userlocks = sk->sk_userlocks; - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) return 0; } @@ -527,9 +529,11 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, { struct inet_diag_req *r = NLMSG_DATA(cb->nlh); - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { struct inet_diag_entry entry; - struct rtattr *bc = (struct rtattr *)(r + 1); + const struct nlattr *bc = nlmsg_find_attr(cb->nlh, + sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.family = tw->tw_family; #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) @@ -548,7 +552,7 @@ static int inet_twsk_diag_dump(struct inet_timewait_sock *tw, entry.dport = ntohs(tw->tw_dport); entry.userlocks = 0; - if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry)) return 0; } @@ -618,7 +622,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, struct inet_diag_req *r = NLMSG_DATA(cb->nlh); struct inet_connection_sock *icsk = inet_csk(sk); struct listen_sock *lopt; - struct rtattr *bc = NULL; + const struct nlattr *bc = NULL; struct inet_sock *inet = inet_sk(sk); int j, s_j; int reqnum, s_reqnum; @@ -638,8 +642,9 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, if (!lopt || !lopt->qlen) goto out; - if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) { - bc = (struct rtattr *)(r + 1); + if (nlmsg_attrlen(cb->nlh, sizeof(*r))) { + bc = nlmsg_find_attr(cb->nlh, sizeof(*r), + INET_DIAG_REQ_BYTECODE); entry.sport = inet->inet_num; entry.userlocks = sk->sk_userlocks; } @@ -672,8 +677,8 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk, &ireq->rmt_addr; entry.dport = ntohs(ireq->rmt_port); - if (!inet_diag_bc_run(RTA_DATA(bc), - RTA_PAYLOAD(bc), &entry)) + if (!inet_diag_bc_run(nla_data(bc), + nla_len(bc), &entry)) continue; } diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index 1b344f30b463..3c0369a3a663 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -133,8 +133,7 @@ int __inet_inherit_port(struct sock *sk, struct sock *child) } } } - sk_add_bind_node(child, &tb->owners); - inet_csk(child)->icsk_bind_hash = tb; + inet_bind_hash(child, tb, port); spin_unlock(&head->lock); return 0; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 01087e035b7d..70ff77f02eee 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -1325,7 +1325,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct iphdr *iph = &tunnel->parms.iph; - struct ipgre_net *ign = net_generic(dev_net(dev), ipgre_net_id); tunnel->dev = dev; strcpy(tunnel->parms.name, dev->name); @@ -1336,7 +1335,6 @@ static void ipgre_fb_tunnel_init(struct net_device *dev) tunnel->hlen = sizeof(struct iphdr) + 4; dev_hold(dev); - rcu_assign_pointer(ign->tunnels_wc[0], tunnel); } @@ -1383,10 +1381,12 @@ static int __net_init ipgre_init_net(struct net *net) if ((err = register_netdev(ign->fb_tunnel_dev))) goto err_reg_dev; + rcu_assign_pointer(ign->tunnels_wc[0], + netdev_priv(ign->fb_tunnel_dev)); return 0; err_reg_dev: - free_netdev(ign->fb_tunnel_dev); + ipgre_dev_free(ign->fb_tunnel_dev); err_alloc_dev: return err; } diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index 3cad2591ace0..3fac340a28d5 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c @@ -927,6 +927,7 @@ static int get_info(struct net *net, void __user *user, private = &tmp; } #endif + memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c index d31b007a6d80..a846d633b3b6 100644 --- a/net/ipv4/netfilter/ip_tables.c +++ b/net/ipv4/netfilter/ip_tables.c @@ -1124,6 +1124,7 @@ static int get_info(struct net *net, void __user *user, private = &tmp; } #endif + memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 295c97431e43..c04787ce1a71 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -47,26 +47,6 @@ __nf_nat_proto_find(u_int8_t protonum) return rcu_dereference(nf_nat_protos[protonum]); } -static const struct nf_nat_protocol * -nf_nat_proto_find_get(u_int8_t protonum) -{ - const struct nf_nat_protocol *p; - - rcu_read_lock(); - p = __nf_nat_proto_find(protonum); - if (!try_module_get(p->me)) - p = &nf_nat_unknown_protocol; - rcu_read_unlock(); - - return p; -} - -static void -nf_nat_proto_put(const struct nf_nat_protocol *p) -{ - module_put(p->me); -} - /* We keep an extra hash for each conntrack, for fast searching. */ static inline unsigned int hash_by_src(const struct net *net, u16 zone, @@ -588,6 +568,26 @@ static struct nf_ct_ext_type nat_extend __read_mostly = { #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_conntrack.h> +static const struct nf_nat_protocol * +nf_nat_proto_find_get(u_int8_t protonum) +{ + const struct nf_nat_protocol *p; + + rcu_read_lock(); + p = __nf_nat_proto_find(protonum); + if (!try_module_get(p->me)) + p = &nf_nat_unknown_protocol; + rcu_read_unlock(); + + return p; +} + +static void +nf_nat_proto_put(const struct nf_nat_protocol *p) +{ + module_put(p->me); +} + static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = { [CTA_PROTONAT_PORT_MIN] = { .type = NLA_U16 }, [CTA_PROTONAT_PORT_MAX] = { .type = NLA_U16 }, diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 4ae1f203f7cb..b14ec7d03b6e 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -59,13 +59,13 @@ static int sockstat_seq_show(struct seq_file *seq, void *v) local_bh_enable(); socket_seq_show(seq); - seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n", + seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n", sock_prot_inuse_get(net, &tcp_prot), orphans, tcp_death_row.tw_count, sockets, - atomic_read(&tcp_memory_allocated)); - seq_printf(seq, "UDP: inuse %d mem %d\n", + atomic_long_read(&tcp_memory_allocated)); + seq_printf(seq, "UDP: inuse %d mem %ld\n", sock_prot_inuse_get(net, &udp_prot), - atomic_read(&udp_memory_allocated)); + atomic_long_read(&udp_memory_allocated)); seq_printf(seq, "UDPLITE: inuse %d\n", sock_prot_inuse_get(net, &udplite_prot)); seq_printf(seq, "RAW: inuse %d\n", @@ -253,6 +253,7 @@ static const struct snmp_mib snmp4_net_list[] = { SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP), SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER), + SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW), SNMP_MIB_SENTINEL }; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index d96c1da4b17c..1b4ec21497a4 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -26,6 +26,8 @@ static int zero; static int tcp_retr1_max = 255; static int ip_local_port_range_min[] = { 1, 1 }; static int ip_local_port_range_max[] = { 65535, 65535 }; +static int tcp_adv_win_scale_min = -31; +static int tcp_adv_win_scale_max = 31; /* Update system visible IP port range */ static void set_local_port_range(int range[2]) @@ -398,7 +400,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_mem, .maxlen = sizeof(sysctl_tcp_mem), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_doulongvec_minmax }, { .procname = "tcp_wmem", @@ -426,7 +428,9 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_adv_win_scale, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &tcp_adv_win_scale_min, + .extra2 = &tcp_adv_win_scale_max, }, { .procname = "tcp_tw_reuse", @@ -602,8 +606,7 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_udp_mem, .maxlen = sizeof(sysctl_udp_mem), .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &zero + .proc_handler = proc_doulongvec_minmax, }, { .procname = "udp_rmem_min", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1664a0590bb8..f15c36a706ec 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -282,7 +282,7 @@ int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; struct percpu_counter tcp_orphan_count; EXPORT_SYMBOL_GPL(tcp_orphan_count); -int sysctl_tcp_mem[3] __read_mostly; +long sysctl_tcp_mem[3] __read_mostly; int sysctl_tcp_wmem[3] __read_mostly; int sysctl_tcp_rmem[3] __read_mostly; @@ -290,7 +290,7 @@ EXPORT_SYMBOL(sysctl_tcp_mem); EXPORT_SYMBOL(sysctl_tcp_rmem); EXPORT_SYMBOL(sysctl_tcp_wmem); -atomic_t tcp_memory_allocated; /* Current allocated memory. */ +atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ EXPORT_SYMBOL(tcp_memory_allocated); /* @@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, /* Values greater than interface MTU won't take effect. However * at the point when this call is done we typically don't yet * know which interface is going to be used */ - if (val < 8 || val > MAX_TCP_WINDOW) { + if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { err = -EINVAL; break; } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3357f69e353d..6d8ab1c4efc3 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -259,8 +259,11 @@ static void tcp_fixup_sndbuf(struct sock *sk) int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); - if (sk->sk_sndbuf < 3 * sndmem) - sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]); + if (sk->sk_sndbuf < 3 * sndmem) { + sk->sk_sndbuf = 3 * sndmem; + if (sk->sk_sndbuf > sysctl_tcp_wmem[2]) + sk->sk_sndbuf = sysctl_tcp_wmem[2]; + } } /* 2. Tuning advertised window (window_clamp, rcv_ssthresh) @@ -396,7 +399,7 @@ static void tcp_clamp_window(struct sock *sk) if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_memory_pressure && - atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { + atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } @@ -4861,7 +4864,7 @@ static int tcp_should_expand_sndbuf(struct sock *sk) return 0; /* If we are under soft global TCP memory pressure, do not expand. */ - if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) + if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0]) return 0; /* If we filled the congestion window, do not expand. */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8f8527d41682..e13da6de1fc7 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -415,6 +415,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) !icsk->icsk_backoff) break; + if (sock_owned_by_user(sk)) + break; + icsk->icsk_backoff--; inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) << icsk->icsk_backoff; @@ -429,11 +432,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); - } else if (sock_owned_by_user(sk)) { - /* RTO revert clocked out retransmission, - * but socket is locked. Will defer. */ - inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, - HZ/20, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ @@ -2045,7 +2043,9 @@ get_req: } get_sk: sk_nulls_for_each_from(sk, node) { - if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { + if (!net_eq(sock_net(sk), net)) + continue; + if (sk->sk_family == st->family) { cur = sk; goto out; } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 43cf901d7659..a66735f75963 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -347,7 +347,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) * socket up. We've got bigger problems than * non-graceful socket closings. */ - LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); } tcp_update_metrics(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e96152207164..8750d4050b48 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -231,11 +231,10 @@ void tcp_select_initial_window(int __space, __u32 mss, /* when initializing use the value from init_rcv_wnd * rather than the default from above */ - if (init_rcv_wnd && - (*rcv_wnd > init_rcv_wnd * mss)) - *rcv_wnd = init_rcv_wnd * mss; - else if (*rcv_wnd > init_cwnd * mss) - *rcv_wnd = init_cwnd * mss; + if (init_rcv_wnd) + *rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss); + else + *rcv_wnd = min(*rcv_wnd, init_cwnd * mss); } /* Set the clamp no higher than max representable value */ @@ -386,27 +385,30 @@ struct tcp_out_options { */ static u8 tcp_cookie_size_check(u8 desired) { - if (desired > 0) { + int cookie_size; + + if (desired > 0) /* previously specified */ return desired; - } - if (sysctl_tcp_cookie_size <= 0) { + + cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size); + if (cookie_size <= 0) /* no default specified */ return 0; - } - if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) { + + if (cookie_size <= TCP_COOKIE_MIN) /* value too small, specify minimum */ return TCP_COOKIE_MIN; - } - if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) { + + if (cookie_size >= TCP_COOKIE_MAX) /* value too large, specify maximum */ return TCP_COOKIE_MAX; - } - if (0x1 & sysctl_tcp_cookie_size) { + + if (cookie_size & 1) /* 8-bit multiple, illegal, fix it */ - return (u8)(sysctl_tcp_cookie_size + 0x1); - } - return (u8)sysctl_tcp_cookie_size; + cookie_size++; + + return (u8)cookie_size; } /* Write previously computed TCP options to the packet. @@ -1513,6 +1515,7 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); u32 send_win, cong_win, limit, in_flight; + int win_divisor; if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN) goto send_now; @@ -1544,13 +1547,14 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) goto send_now; - if (sysctl_tcp_tso_win_divisor) { + win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor); + if (win_divisor) { u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); /* If at least some fraction of a window is available, * just use it. */ - chunk /= sysctl_tcp_tso_win_divisor; + chunk /= win_divisor; if (limit >= chunk) goto send_now; } else { diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 28cb2d733a3c..5e0a3a582a59 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -110,7 +110,7 @@ struct udp_table udp_table __read_mostly; EXPORT_SYMBOL(udp_table); -int sysctl_udp_mem[3] __read_mostly; +long sysctl_udp_mem[3] __read_mostly; EXPORT_SYMBOL(sysctl_udp_mem); int sysctl_udp_rmem_min __read_mostly; @@ -119,7 +119,7 @@ EXPORT_SYMBOL(sysctl_udp_rmem_min); int sysctl_udp_wmem_min __read_mostly; EXPORT_SYMBOL(sysctl_udp_wmem_min); -atomic_t udp_memory_allocated; +atomic_long_t udp_memory_allocated; EXPORT_SYMBOL(udp_memory_allocated); #define MAX_UDP_PORTS 65536 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e048ec62d109..93b7a933a775 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -98,7 +98,11 @@ #endif #define INFINITY_LIFE_TIME 0xFFFFFFFF -#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b))) + +static inline u32 cstamp_delta(unsigned long cstamp) +{ + return (cstamp - INITIAL_JIFFIES) * 100UL / HZ; +} #define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1) #define ADDRCONF_TIMER_FUZZ (HZ / 4) @@ -2740,10 +2744,6 @@ static int addrconf_ifdown(struct net_device *dev, int how) /* Flag it for later restoration when link comes up */ ifa->flags |= IFA_F_TENTATIVE; ifa->state = INET6_IFADDR_STATE_DAD; - - write_unlock_bh(&idev->lock); - - in6_ifa_hold(ifa); } else { list_del(&ifa->if_list); @@ -2758,19 +2758,15 @@ static int addrconf_ifdown(struct net_device *dev, int how) ifa->state = INET6_IFADDR_STATE_DEAD; spin_unlock_bh(&ifa->state_lock); - if (state == INET6_IFADDR_STATE_DEAD) - goto put_ifa; - } - - __ipv6_ifa_notify(RTM_DELADDR, ifa); - if (ifa->state == INET6_IFADDR_STATE_DEAD) - atomic_notifier_call_chain(&inet6addr_chain, - NETDEV_DOWN, ifa); - -put_ifa: - in6_ifa_put(ifa); + if (state != INET6_IFADDR_STATE_DEAD) { + __ipv6_ifa_notify(RTM_DELADDR, ifa); + atomic_notifier_call_chain(&inet6addr_chain, + NETDEV_DOWN, ifa); + } - write_lock_bh(&idev->lock); + in6_ifa_put(ifa); + write_lock_bh(&idev->lock); + } } list_splice(&keep_list, &idev->addr_list); @@ -3452,10 +3448,8 @@ static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp, { struct ifa_cacheinfo ci; - ci.cstamp = (u32)(TIME_DELTA(cstamp, INITIAL_JIFFIES) / HZ * 100 - + TIME_DELTA(cstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); - ci.tstamp = (u32)(TIME_DELTA(tstamp, INITIAL_JIFFIES) / HZ * 100 - + TIME_DELTA(tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); + ci.cstamp = cstamp_delta(cstamp); + ci.tstamp = cstamp_delta(tstamp); ci.ifa_prefered = preferred; ci.ifa_valid = valid; @@ -3806,8 +3800,10 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_AUTOCONF] = cnf->autoconf; array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits; array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits; - array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval; - array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay; + array[DEVCONF_RTR_SOLICIT_INTERVAL] = + jiffies_to_msecs(cnf->rtr_solicit_interval); + array[DEVCONF_RTR_SOLICIT_DELAY] = + jiffies_to_msecs(cnf->rtr_solicit_delay); array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version; #ifdef CONFIG_IPV6_PRIVACY array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr; @@ -3821,7 +3817,8 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf, array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo; #ifdef CONFIG_IPV6_ROUTER_PREF array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref; - array[DEVCONF_RTR_PROBE_INTERVAL] = cnf->rtr_probe_interval; + array[DEVCONF_RTR_PROBE_INTERVAL] = + jiffies_to_msecs(cnf->rtr_probe_interval); #ifdef CONFIG_IPV6_ROUTE_INFO array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen; #endif @@ -3937,10 +3934,9 @@ static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev, NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags); ci.max_reasm_len = IPV6_MAXPLEN; - ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100 - + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ); - ci.reachable_time = idev->nd_parms->reachable_time; - ci.retrans_time = idev->nd_parms->retrans_time; + ci.tstamp = cstamp_delta(idev->tstamp); + ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time); + ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time); NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci); nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32)); @@ -4025,11 +4021,11 @@ void inet6_ifinfo_notify(int event, struct inet6_dev *idev) kfree_skb(skb); goto errout; } - rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC); + rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC); return; errout: if (err < 0) - rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err); + rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err); } static inline size_t inet6_prefix_nlmsg_size(void) diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 2a59610c2a58..70e891a20fb9 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1175,6 +1175,8 @@ static void ip6_tnl_link_config(struct ip6_tnl *t) sizeof (struct ipv6hdr); dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr); + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + dev->mtu-=8; if (dev->mtu < IPV6_MIN_MTU) dev->mtu = IPV6_MIN_MTU; @@ -1363,12 +1365,17 @@ static const struct net_device_ops ip6_tnl_netdev_ops = { static void ip6_tnl_dev_setup(struct net_device *dev) { + struct ip6_tnl *t; + dev->netdev_ops = &ip6_tnl_netdev_ops; dev->destructor = ip6_dev_free; dev->type = ARPHRD_TUNNEL6; dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr); dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr); + t = netdev_priv(dev); + if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) + dev->mtu-=8; dev->flags |= IFF_NOARP; dev->addr_len = sizeof(struct in6_addr); dev->features |= NETIF_F_NETNS_LOCAL; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 51df035897e7..455582384ece 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -1137,6 +1137,7 @@ static int get_info(struct net *net, void __user *user, private = &tmp; } #endif + memset(&info, 0, sizeof(info)); info.valid_hooks = t->valid_hooks; memcpy(info.hook_entry, private->hook_entry, sizeof(info.hook_entry)); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 3a3f129a44cb..79d43aa8fa8d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -286,7 +286,7 @@ found: /* Check for overlap with preceding fragment. */ if (prev && - (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0) + (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c index d082eaeefa25..24b3558b8e67 100644 --- a/net/ipv6/proc.c +++ b/net/ipv6/proc.c @@ -126,6 +126,8 @@ static const struct snmp_mib snmp6_udp6_list[] = { SNMP_MIB_ITEM("Udp6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("Udp6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("Udp6OutDatagrams", UDP_MIB_OUTDATAGRAMS), + SNMP_MIB_ITEM("Udp6RcvbufErrors", UDP_MIB_RCVBUFERRORS), + SNMP_MIB_ITEM("Udp6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_SENTINEL }; @@ -134,6 +136,8 @@ static const struct snmp_mib snmp6_udplite6_list[] = { SNMP_MIB_ITEM("UdpLite6NoPorts", UDP_MIB_NOPORTS), SNMP_MIB_ITEM("UdpLite6InErrors", UDP_MIB_INERRORS), SNMP_MIB_ITEM("UdpLite6OutDatagrams", UDP_MIB_OUTDATAGRAMS), + SNMP_MIB_ITEM("UdpLite6RcvbufErrors", UDP_MIB_RCVBUFERRORS), + SNMP_MIB_ITEM("UdpLite6SndbufErrors", UDP_MIB_SNDBUFERRORS), SNMP_MIB_SENTINEL }; diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index c7ba3149633f..0f2766453759 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -349,7 +349,7 @@ found: /* Check for overlap with preceding fragment. */ if (prev && - (FRAG6_CB(prev)->offset + prev->len) - offset > 0) + (FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 25661f968f3f..96455ffb76fb 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1945,8 +1945,12 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops); struct neighbour *neigh; - if (rt == NULL) + if (rt == NULL) { + if (net_ratelimit()) + pr_warning("IPv6: Maximum number of routes reached," + " consider increasing route/max_size.\n"); return ERR_PTR(-ENOMEM); + } dev_hold(net->loopback_dev); in6_dev_hold(idev); @@ -2741,6 +2745,7 @@ static void __net_exit ip6_route_net_exit(struct net *net) kfree(net->ipv6.ip6_prohibit_entry); kfree(net->ipv6.ip6_blk_hole_entry); #endif + dst_entries_destroy(&net->ipv6.ip6_dst_ops); } static struct pernet_operations ip6_route_net_ops = { @@ -2832,5 +2837,6 @@ void ip6_route_cleanup(void) xfrm6_fini(); fib6_gc_cleanup(); unregister_pernet_subsys(&ip6_route_net_ops); + dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); } diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index d6bfaec3bbbf..8c4d00c7cd2b 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c @@ -606,8 +606,9 @@ static int ipip6_rcv(struct sk_buff *skb) return 0; } - icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + /* no tunnel matched, let upstream know, ipsec may handle it */ rcu_read_unlock(); + return 1; out: kfree_skb(skb); return 0; diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 7f097989cde2..a6de3059746d 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -45,7 +45,6 @@ #include <linux/capability.h> #include <linux/module.h> #include <linux/types.h> -#include <linux/smp_lock.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/slab.h> diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 7fa86373de41..7c567b8aa89a 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -15,7 +15,6 @@ #include <linux/sched.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include "irnet_ppp.h" /* Private header */ /* Please put other headers in irnet.h - Thanks */ diff --git a/net/irda/irttp.c b/net/irda/irttp.c index 285761e77d90..f6054f9ccbe3 100644 --- a/net/irda/irttp.c +++ b/net/irda/irttp.c @@ -550,22 +550,30 @@ EXPORT_SYMBOL(irttp_close_tsap); */ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) { + int ret; + IRDA_ASSERT(self != NULL, return -1;); IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;); IRDA_ASSERT(skb != NULL, return -1;); IRDA_DEBUG(4, "%s()\n", __func__); + /* Take shortcut on zero byte packets */ + if (skb->len == 0) { + ret = 0; + goto err; + } + /* Check that nothing bad happens */ - if ((skb->len == 0) || (!self->connected)) { - IRDA_DEBUG(1, "%s(), No data, or not connected\n", - __func__); + if (!self->connected) { + IRDA_WARNING("%s(), Not connected\n", __func__); + ret = -ENOTCONN; goto err; } if (skb->len > self->max_seg_size) { - IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n", - __func__); + IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__); + ret = -EMSGSIZE; goto err; } @@ -576,7 +584,7 @@ int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb) err: dev_kfree_skb(skb); - return -1; + return ret; } EXPORT_SYMBOL(irttp_udata_request); @@ -599,9 +607,15 @@ int irttp_data_request(struct tsap_cb *self, struct sk_buff *skb) IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__, skb_queue_len(&self->tx_queue)); + /* Take shortcut on zero byte packets */ + if (skb->len == 0) { + ret = 0; + goto err; + } + /* Check that nothing bad happens */ - if ((skb->len == 0) || (!self->connected)) { - IRDA_WARNING("%s: No data, or not connected\n", __func__); + if (!self->connected) { + IRDA_WARNING("%s: Not connected\n", __func__); ret = -ENOTCONN; goto err; } diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 104ec3b283d4..b8dbae82fab8 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -249,7 +249,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file) struct seq_file *seq; int rc = -ENOMEM; - pd = kzalloc(GFP_KERNEL, sizeof(*pd)); + pd = kzalloc(sizeof(*pd), GFP_KERNEL); if (pd == NULL) goto out; diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 0bf6a59545ab..522e219f3558 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -674,4 +674,8 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("James Chapman <jchapman@katalix.com>"); MODULE_DESCRIPTION("L2TP over IP"); MODULE_VERSION("1.0"); -MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP); + +/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like + * enums + */ +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP); diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 582612998211..e35dbe55f520 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -317,8 +317,9 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) goto out; rc = -ENODEV; rtnl_lock(); + rcu_read_lock(); if (sk->sk_bound_dev_if) { - llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if); + llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if); if (llc->dev) { if (!addr->sllc_arphrd) addr->sllc_arphrd = llc->dev->type; @@ -329,13 +330,13 @@ static int llc_ui_bind(struct socket *sock, struct sockaddr *uaddr, int addrlen) !llc_mac_match(addr->sllc_mac, llc->dev->dev_addr)) { rc = -EINVAL; - dev_put(llc->dev); llc->dev = NULL; } } } else llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd, addr->sllc_mac); + rcu_read_unlock(); rtnl_unlock(); if (!llc->dev) goto out; diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig index 4d6f8653ec88..8e8ea9cb7093 100644 --- a/net/mac80211/Kconfig +++ b/net/mac80211/Kconfig @@ -92,7 +92,7 @@ config MAC80211_MESH config MAC80211_LEDS bool "Enable LED triggers" depends on MAC80211 - select NEW_LEDS + depends on LEDS_CLASS select LEDS_TRIGGERS ---help--- This option enables a few LED triggers for different diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index f9163b12c7f1..7aa85591dbe7 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -391,6 +391,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, u32 hw_reconf_flags = 0; int i; + if (local->scan_sdata == sdata) + ieee80211_scan_cancel(local); + clear_bit(SDATA_STATE_RUNNING, &sdata->state); /* @@ -523,9 +526,6 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); - if (local->scan_sdata == sdata) - ieee80211_scan_cancel(local); - /* * Disable beaconing here for mesh only, AP and IBSS * are already taken care of. diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 902b03ee8f60..54fb4a0e76f0 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2247,6 +2247,10 @@ ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx) break; case cpu_to_le16(IEEE80211_STYPE_DEAUTH): case cpu_to_le16(IEEE80211_STYPE_DISASSOC): + if (is_multicast_ether_addr(mgmt->da) && + !is_broadcast_ether_addr(mgmt->da)) + return RX_DROP_MONITOR; + /* process only for station */ if (sdata->vif.type != NL80211_IFTYPE_STATION) return RX_DROP_MONITOR; @@ -2741,6 +2745,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, if (ieee80211_prepare_and_rx_handle(&rx, skb, true)) return; + goto out; } } @@ -2780,6 +2785,7 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw, return; } + out: dev_kfree_skb(skb); } diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 96c594309506..7a637b80a62e 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -1587,7 +1587,12 @@ static void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, list) { if (!ieee80211_sdata_running(tmp_sdata)) continue; - if (tmp_sdata->vif.type != NL80211_IFTYPE_AP) + if (tmp_sdata->vif.type == + NL80211_IFTYPE_MONITOR || + tmp_sdata->vif.type == + NL80211_IFTYPE_AP_VLAN || + tmp_sdata->vif.type == + NL80211_IFTYPE_WDS) continue; if (compare_ether_addr(tmp_sdata->vif.addr, hdr->addr2) == 0) { @@ -1732,15 +1737,13 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, int nh_pos, h_pos; struct sta_info *sta = NULL; u32 sta_flags = 0; + struct sk_buff *tmp_skb; if (unlikely(skb->len < ETH_HLEN)) { ret = NETDEV_TX_OK; goto fail; } - nh_pos = skb_network_header(skb) - skb->data; - h_pos = skb_transport_header(skb) - skb->data; - /* convert Ethernet header to proper 802.11 header (based on * operation mode) */ ethertype = (skb->data[12] << 8) | skb->data[13]; @@ -1913,6 +1916,20 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, goto fail; } + /* + * If the skb is shared we need to obtain our own copy. + */ + if (skb_shared(skb)) { + tmp_skb = skb; + skb = skb_copy(skb, GFP_ATOMIC); + kfree_skb(tmp_skb); + + if (!skb) { + ret = NETDEV_TX_OK; + goto fail; + } + } + hdr.frame_control = fc; hdr.duration_id = 0; hdr.seq_ctrl = 0; @@ -1931,6 +1948,9 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb, encaps_len = 0; } + nh_pos = skb_network_header(skb) - skb->data; + h_pos = skb_transport_header(skb) - skb->data; + skb_pull(skb, skip_header_bytes); nh_pos -= skip_header_bytes; h_pos -= skip_header_bytes; diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig index a22dac227055..70bd1d0774c6 100644 --- a/net/netfilter/ipvs/Kconfig +++ b/net/netfilter/ipvs/Kconfig @@ -4,6 +4,7 @@ menuconfig IP_VS tristate "IP virtual server support" depends on NET && INET && NETFILTER + depends on (NF_CONNTRACK || NF_CONNTRACK=n) ---help--- IP Virtual Server support will let you build a high-performance virtual server based on cluster of two or more real servers. This diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index 1eacf8d9966a..27a5ea6b6a0f 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -1312,7 +1312,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int *vmalloced, int nulls) if (!hash) { *vmalloced = 1; printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n"); - hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, + PAGE_KERNEL); } if (hash && nulls) diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index ed6d92958023..dc7bb74110df 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -292,6 +292,12 @@ int nf_conntrack_l4proto_register(struct nf_conntrack_l4proto *l4proto) for (i = 0; i < MAX_NF_CT_PROTO; i++) proto_array[i] = &nf_conntrack_l4proto_generic; + + /* Before making proto_array visible to lockless readers, + * we must make sure its content is committed to memory. + */ + smp_wmb(); + nf_ct_protos[l4proto->l3proto] = proto_array; } else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] != &nf_conntrack_l4proto_generic) { diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 3616f27b9d46..8298e676f5a0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1610,9 +1610,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock, err = -EINVAL; vnet_hdr_len = sizeof(vnet_hdr); - if ((len -= vnet_hdr_len) < 0) + if (len < vnet_hdr_len) goto out_free; + len -= vnet_hdr_len; + if (skb_is_gso(skb)) { struct skb_shared_info *sinfo = skb_shinfo(skb); @@ -1719,7 +1721,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strlcpy(uaddr->sa_data, dev->name, 15); + strncpy(uaddr->sa_data, dev->name, 14); else memset(uaddr->sa_data, 0, 14); rcu_read_unlock(); @@ -1742,6 +1744,7 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr, sll->sll_family = AF_PACKET; sll->sll_ifindex = po->ifindex; sll->sll_protocol = po->num; + sll->sll_pkttype = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex); if (dev) { diff --git a/net/rds/loop.c b/net/rds/loop.c index c390156b426f..aeec1d483b17 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c @@ -134,8 +134,12 @@ static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) static void rds_loop_conn_free(void *arg) { struct rds_loop_connection *lc = arg; + unsigned long flags; + rdsdebug("lc %p\n", lc); + spin_lock_irqsave(&loop_conns_lock, flags); list_del(&lc->loop_node); + spin_unlock_irqrestore(&loop_conns_lock, flags); kfree(lc); } diff --git a/net/rds/message.c b/net/rds/message.c index a84545dae370..1fd3d29023d7 100644 --- a/net/rds/message.c +++ b/net/rds/message.c @@ -224,6 +224,9 @@ struct scatterlist *rds_message_alloc_sgs(struct rds_message *rm, int nents) WARN_ON(rm->m_used_sgs + nents > rm->m_total_sgs); WARN_ON(!nents); + if (rm->m_used_sgs + nents > rm->m_total_sgs) + return NULL; + sg_ret = &sg_first[rm->m_used_sgs]; sg_init_table(sg_ret, nents); rm->m_used_sgs += nents; @@ -246,6 +249,10 @@ struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned in rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len); rm->data.op_nents = ceil(total_len, PAGE_SIZE); rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs); + if (!rm->data.op_sg) { + rds_message_put(rm); + return ERR_PTR(-ENOMEM); + } for (i = 0; i < rm->data.op_nents; ++i) { sg_set_page(&rm->data.op_sg[i], diff --git a/net/rds/rdma.c b/net/rds/rdma.c index 1a41debca1ce..4e37c1cbe8b2 100644 --- a/net/rds/rdma.c +++ b/net/rds/rdma.c @@ -479,13 +479,38 @@ void rds_atomic_free_op(struct rm_atomic_op *ao) /* - * Count the number of pages needed to describe an incoming iovec. + * Count the number of pages needed to describe an incoming iovec array. */ -static int rds_rdma_pages(struct rds_rdma_args *args) +static int rds_rdma_pages(struct rds_iovec iov[], int nr_iovecs) +{ + int tot_pages = 0; + unsigned int nr_pages; + unsigned int i; + + /* figure out the number of pages in the vector */ + for (i = 0; i < nr_iovecs; i++) { + nr_pages = rds_pages_in_vec(&iov[i]); + if (nr_pages == 0) + return -EINVAL; + + tot_pages += nr_pages; + + /* + * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, + * so tot_pages cannot overflow without first going negative. + */ + if (tot_pages < 0) + return -EINVAL; + } + + return tot_pages; +} + +int rds_rdma_extra_size(struct rds_rdma_args *args) { struct rds_iovec vec; struct rds_iovec __user *local_vec; - unsigned int tot_pages = 0; + int tot_pages = 0; unsigned int nr_pages; unsigned int i; @@ -502,14 +527,16 @@ static int rds_rdma_pages(struct rds_rdma_args *args) return -EINVAL; tot_pages += nr_pages; - } - return tot_pages; -} + /* + * nr_pages for one entry is limited to (UINT_MAX>>PAGE_SHIFT)+1, + * so tot_pages cannot overflow without first going negative. + */ + if (tot_pages < 0) + return -EINVAL; + } -int rds_rdma_extra_size(struct rds_rdma_args *args) -{ - return rds_rdma_pages(args) * sizeof(struct scatterlist); + return tot_pages * sizeof(struct scatterlist); } /* @@ -520,13 +547,12 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, struct cmsghdr *cmsg) { struct rds_rdma_args *args; - struct rds_iovec vec; struct rm_rdma_op *op = &rm->rdma; int nr_pages; unsigned int nr_bytes; struct page **pages = NULL; - struct rds_iovec __user *local_vec; - unsigned int nr; + struct rds_iovec iovstack[UIO_FASTIOV], *iovs = iovstack; + int iov_size; unsigned int i, j; int ret = 0; @@ -541,14 +567,31 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, goto out; } - if (args->nr_local > (u64)UINT_MAX) { + if (args->nr_local > UIO_MAXIOV) { ret = -EMSGSIZE; goto out; } - nr_pages = rds_rdma_pages(args); - if (nr_pages < 0) + /* Check whether to allocate the iovec area */ + iov_size = args->nr_local * sizeof(struct rds_iovec); + if (args->nr_local > UIO_FASTIOV) { + iovs = sock_kmalloc(rds_rs_to_sk(rs), iov_size, GFP_KERNEL); + if (!iovs) { + ret = -ENOMEM; + goto out; + } + } + + if (copy_from_user(iovs, (struct rds_iovec __user *)(unsigned long) args->local_vec_addr, iov_size)) { + ret = -EFAULT; + goto out; + } + + nr_pages = rds_rdma_pages(iovs, args->nr_local); + if (nr_pages < 0) { + ret = -EINVAL; goto out; + } pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) { @@ -564,6 +607,10 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, op->op_recverr = rs->rs_recverr; WARN_ON(!nr_pages); op->op_sg = rds_message_alloc_sgs(rm, nr_pages); + if (!op->op_sg) { + ret = -ENOMEM; + goto out; + } if (op->op_notify || op->op_recverr) { /* We allocate an uninitialized notifier here, because @@ -597,50 +644,40 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, (unsigned long long)args->remote_vec.addr, op->op_rkey); - local_vec = (struct rds_iovec __user *)(unsigned long) args->local_vec_addr; - for (i = 0; i < args->nr_local; i++) { - if (copy_from_user(&vec, &local_vec[i], - sizeof(struct rds_iovec))) { - ret = -EFAULT; - goto out; - } - - nr = rds_pages_in_vec(&vec); - if (nr == 0) { - ret = -EINVAL; - goto out; - } + struct rds_iovec *iov = &iovs[i]; + /* don't need to check, rds_rdma_pages() verified nr will be +nonzero */ + unsigned int nr = rds_pages_in_vec(iov); - rs->rs_user_addr = vec.addr; - rs->rs_user_bytes = vec.bytes; + rs->rs_user_addr = iov->addr; + rs->rs_user_bytes = iov->bytes; /* If it's a WRITE operation, we want to pin the pages for reading. * If it's a READ operation, we need to pin the pages for writing. */ - ret = rds_pin_pages(vec.addr, nr, pages, !op->op_write); + ret = rds_pin_pages(iov->addr, nr, pages, !op->op_write); if (ret < 0) goto out; - rdsdebug("RDS: nr_bytes %u nr %u vec.bytes %llu vec.addr %llx\n", - nr_bytes, nr, vec.bytes, vec.addr); + rdsdebug("RDS: nr_bytes %u nr %u iov->bytes %llu iov->addr %llx\n", + nr_bytes, nr, iov->bytes, iov->addr); - nr_bytes += vec.bytes; + nr_bytes += iov->bytes; for (j = 0; j < nr; j++) { - unsigned int offset = vec.addr & ~PAGE_MASK; + unsigned int offset = iov->addr & ~PAGE_MASK; struct scatterlist *sg; sg = &op->op_sg[op->op_nents + j]; sg_set_page(sg, pages[j], - min_t(unsigned int, vec.bytes, PAGE_SIZE - offset), + min_t(unsigned int, iov->bytes, PAGE_SIZE - offset), offset); - rdsdebug("RDS: sg->offset %x sg->len %x vec.addr %llx vec.bytes %llu\n", - sg->offset, sg->length, vec.addr, vec.bytes); + rdsdebug("RDS: sg->offset %x sg->len %x iov->addr %llx iov->bytes %llu\n", + sg->offset, sg->length, iov->addr, iov->bytes); - vec.addr += sg->length; - vec.bytes -= sg->length; + iov->addr += sg->length; + iov->bytes -= sg->length; } op->op_nents += nr; @@ -655,13 +692,14 @@ int rds_cmsg_rdma_args(struct rds_sock *rs, struct rds_message *rm, } op->op_bytes = nr_bytes; - ret = 0; out: + if (iovs != iovstack) + sock_kfree_s(rds_rs_to_sk(rs), iovs, iov_size); kfree(pages); if (ret) rds_rdma_free_op(op); - - rds_stats_inc(s_send_rdma); + else + rds_stats_inc(s_send_rdma); return ret; } @@ -773,6 +811,10 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm, rm->atomic.op_active = 1; rm->atomic.op_recverr = rs->rs_recverr; rm->atomic.op_sg = rds_message_alloc_sgs(rm, 1); + if (!rm->atomic.op_sg) { + ret = -ENOMEM; + goto err; + } /* verify 8 byte-aligned */ if (args->local_addr & 0x7) { diff --git a/net/rds/send.c b/net/rds/send.c index 0bc9db17a87d..35b9c2e9caf1 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -973,6 +973,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, /* Attach data to the rm */ if (payload_len) { rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); + if (!rm->data.op_sg) { + ret = -ENOMEM; + goto out; + } ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len); if (ret) goto out; diff --git a/net/rds/tcp.c b/net/rds/tcp.c index 08a8c6cf2d10..8e0a32001c90 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -221,7 +221,13 @@ static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp) static void rds_tcp_conn_free(void *arg) { struct rds_tcp_connection *tc = arg; + unsigned long flags; rdsdebug("freeing tc %p\n", tc); + + spin_lock_irqsave(&rds_tcp_conn_lock, flags); + list_del(&tc->t_tcp_node); + spin_unlock_irqrestore(&rds_tcp_conn_lock, flags); + kmem_cache_free(rds_tcp_conn_slab, tc); } diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c index efd4f95fd050..f23d9155b1ef 100644 --- a/net/sched/cls_basic.c +++ b/net/sched/cls_basic.c @@ -268,6 +268,10 @@ static int basic_dump(struct tcf_proto *tp, unsigned long fh, goto nla_put_failure; nla_nest_end(skb, nest); + + if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0) + goto nla_put_failure; + return skb->len; nla_put_failure: diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c index 37dff78e9cb1..d49c40fb7e09 100644 --- a/net/sched/cls_cgroup.c +++ b/net/sched/cls_cgroup.c @@ -34,8 +34,6 @@ struct cgroup_subsys net_cls_subsys = { .populate = cgrp_populate, #ifdef CONFIG_NET_CLS_CGROUP .subsys_id = net_cls_subsys_id, -#else -#define net_cls_subsys_id net_cls_subsys.subsys_id #endif .module = THIS_MODULE, }; diff --git a/net/sched/em_text.c b/net/sched/em_text.c index 763253257411..ea8f566e720c 100644 --- a/net/sched/em_text.c +++ b/net/sched/em_text.c @@ -103,7 +103,8 @@ retry: static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m) { - textsearch_destroy(EM_TEXT_PRIV(m)->config); + if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config) + textsearch_destroy(EM_TEXT_PRIV(m)->config); } static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m) diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 1ef29c74d85e..e58f9476f29c 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -92,7 +92,7 @@ static struct sctp_af *sctp_af_v6_specific; struct kmem_cache *sctp_chunk_cachep __read_mostly; struct kmem_cache *sctp_bucket_cachep __read_mostly; -int sysctl_sctp_mem[3]; +long sysctl_sctp_mem[3]; int sysctl_sctp_rmem[3]; int sysctl_sctp_wmem[3]; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index e34ca9cc1167..0b9ee34ad35c 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -111,12 +111,12 @@ static void sctp_sock_migrate(struct sock *, struct sock *, static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; extern struct kmem_cache *sctp_bucket_cachep; -extern int sysctl_sctp_mem[3]; +extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; extern int sysctl_sctp_wmem[3]; static int sctp_memory_pressure; -static atomic_t sctp_memory_allocated; +static atomic_long_t sctp_memory_allocated; struct percpu_counter sctp_sockets_allocated; static void sctp_enter_memory_pressure(struct sock *sk) @@ -2932,6 +2932,7 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva struct sctp_association *asoc = NULL; struct sctp_setpeerprim prim; struct sctp_chunk *chunk; + struct sctp_af *af; int err; sp = sctp_sk(sk); @@ -2959,6 +2960,13 @@ static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optva if (!sctp_state(asoc, ESTABLISHED)) return -ENOTCONN; + af = sctp_get_af_specific(prim.sspp_addr.ss_family); + if (!af) + return -EINVAL; + + if (!af->addr_valid((union sctp_addr *)&prim.sspp_addr, sp, NULL)) + return -EADDRNOTAVAIL; + if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) return -EADDRNOTAVAIL; diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c index 832590bbe0c0..50cb57f0919e 100644 --- a/net/sctp/sysctl.c +++ b/net/sctp/sysctl.c @@ -54,7 +54,7 @@ static int sack_timer_max = 500; static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */ static int rwnd_scale_max = 16; -extern int sysctl_sctp_mem[3]; +extern long sysctl_sctp_mem[3]; extern int sysctl_sctp_rmem[3]; extern int sysctl_sctp_wmem[3]; @@ -203,7 +203,7 @@ static ctl_table sctp_table[] = { .data = &sysctl_sctp_mem, .maxlen = sizeof(sysctl_sctp_mem), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = proc_doulongvec_minmax }, { .procname = "sctp_rmem", diff --git a/net/socket.c b/net/socket.c index 5247ae10f374..088fb3fd45e0 100644 --- a/net/socket.c +++ b/net/socket.c @@ -732,6 +732,21 @@ static int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, return ret; } +/** + * kernel_recvmsg - Receive a message from a socket (kernel space) + * @sock: The socket to receive the message from + * @msg: Received message + * @vec: Input s/g array for message data + * @num: Size of input s/g array + * @size: Number of bytes to read + * @flags: Message flags (MSG_DONTWAIT, etc...) + * + * On return the msg structure contains the scatter/gather array passed in the + * vec argument. The array is modified so that it consists of the unfilled + * portion of the original array. + * + * The returned value is the total number of bytes received, or an error. + */ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t size, int flags) { @@ -1652,6 +1667,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len, struct iovec iov; int fput_needed; + if (len > INT_MAX) + len = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; @@ -1709,6 +1726,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size, int err, err2; int fput_needed; + if (size > INT_MAX) + size = INT_MAX; sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index 9dab9573be41..92ce94f5146b 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -989,20 +989,26 @@ call_refreshresult(struct rpc_task *task) dprint_status(task); task->tk_status = 0; - task->tk_action = call_allocate; - if (status >= 0 && rpcauth_uptodatecred(task)) - return; + task->tk_action = call_refresh; switch (status) { - case -EACCES: - rpc_exit(task, -EACCES); - return; - case -ENOMEM: - rpc_exit(task, -ENOMEM); + case 0: + if (rpcauth_uptodatecred(task)) + task->tk_action = call_allocate; return; case -ETIMEDOUT: rpc_delay(task, 3*HZ); + case -EAGAIN: + status = -EACCES; + if (!task->tk_cred_retry) + break; + task->tk_cred_retry--; + dprintk("RPC: %5u %s: retry refresh creds\n", + task->tk_pid, __func__); + return; } - task->tk_action = call_refresh; + dprintk("RPC: %5u %s: refresh creds failed with error %d\n", + task->tk_pid, __func__, status); + rpc_exit(task, status); } /* diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c index f71a73107ae9..80df89d957ba 100644 --- a/net/sunrpc/stats.c +++ b/net/sunrpc/stats.c @@ -115,9 +115,7 @@ EXPORT_SYMBOL_GPL(svc_seq_show); */ struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { - struct rpc_iostats *new; - new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); - return new; + return kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL); } EXPORT_SYMBOL_GPL(rpc_alloc_iostats); diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index c82fe739fbdc..3f2c5559ca1a 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -5,7 +5,6 @@ */ #include <linux/sched.h> -#include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/freezer.h> #include <linux/kthread.h> @@ -213,6 +212,7 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { struct svc_xprt *newxprt; + unsigned short newport; if (strcmp(xprt_name, xcl->xcl_name)) continue; @@ -231,8 +231,9 @@ int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, spin_lock_bh(&serv->sv_lock); list_add(&newxprt->xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); + newport = svc_xprt_local_port(newxprt); clear_bit(XPT_BUSY, &newxprt->xpt_flags); - return svc_xprt_local_port(newxprt); + return newport; } err: spin_unlock(&svc_xprt_class_lock); @@ -426,8 +427,13 @@ void svc_xprt_received(struct svc_xprt *xprt) { BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); xprt->xpt_pool = NULL; + /* As soon as we clear busy, the xprt could be closed and + * 'put', so we need a reference to call svc_xprt_enqueue with: + */ + svc_xprt_get(xprt); clear_bit(XPT_BUSY, &xprt->xpt_flags); svc_xprt_enqueue(xprt); + svc_xprt_put(xprt); } EXPORT_SYMBOL_GPL(svc_xprt_received); diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 33217fc3d697..e9f0d5004483 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -396,6 +396,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr, struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; struct tipc_sock *tsock = tipc_sk(sock->sk); + memset(addr, 0, sizeof(*addr)); if (peer) { if ((sock->state != SS_CONNECTED) && ((peer != 2) || (sock->state != SS_DISCONNECTING))) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 3c95304a0817..2268e6798124 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1343,9 +1343,25 @@ static void unix_destruct_scm(struct sk_buff *skb) sock_wfree(skb); } +#define MAX_RECURSION_LEVEL 4 + static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) { int i; + unsigned char max_level = 0; + int unix_sock_count = 0; + + for (i = scm->fp->count - 1; i >= 0; i--) { + struct sock *sk = unix_get_socket(scm->fp->fp[i]); + + if (sk) { + unix_sock_count++; + max_level = max(max_level, + unix_sk(sk)->recursion_level); + } + } + if (unlikely(max_level > MAX_RECURSION_LEVEL)) + return -ETOOMANYREFS; /* * Need to duplicate file references for the sake of garbage @@ -1356,9 +1372,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) if (!UNIXCB(skb).fp) return -ENOMEM; - for (i = scm->fp->count-1; i >= 0; i--) - unix_inflight(scm->fp->fp[i]); - return 0; + if (unix_sock_count) { + for (i = scm->fp->count - 1; i >= 0; i--) + unix_inflight(scm->fp->fp[i]); + } + return max_level; } static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) @@ -1393,6 +1411,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, struct sk_buff *skb; long timeo; struct scm_cookie tmp_scm; + int max_level; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1431,8 +1450,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock, goto out; err = unix_scm_to_skb(siocb->scm, skb, true); - if (err) + if (err < 0) goto out_free; + max_level = err + 1; unix_get_secdata(siocb->scm, skb); skb_reset_transport_header(skb); @@ -1514,6 +1534,8 @@ restart: if (sock_flag(other, SOCK_RCVTSTAMP)) __net_timestamp(skb); skb_queue_tail(&other->sk_receive_queue, skb); + if (max_level > unix_sk(other)->recursion_level) + unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, len); sock_put(other); @@ -1544,6 +1566,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, int sent = 0; struct scm_cookie tmp_scm; bool fds_sent = false; + int max_level; if (NULL == siocb->scm) siocb->scm = &tmp_scm; @@ -1607,10 +1630,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, /* Only send the fds in the first buffer */ err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); - if (err) { + if (err < 0) { kfree_skb(skb); goto out_err; } + max_level = err + 1; fds_sent = true; err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); @@ -1626,6 +1650,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, goto pipe_err_free; skb_queue_tail(&other->sk_receive_queue, skb); + if (max_level > unix_sk(other)->recursion_level) + unix_sk(other)->recursion_level = max_level; unix_state_unlock(other); other->sk_data_ready(other, size); sent += size; @@ -1845,6 +1871,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, unix_state_lock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { + unix_sk(sk)->recursion_level = 0; if (copied >= target) goto unlock; diff --git a/net/unix/garbage.c b/net/unix/garbage.c index c8df6fda0b1f..f89f83bf828e 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait); unsigned int unix_tot_inflight; -static struct sock *unix_get_socket(struct file *filp) +struct sock *unix_get_socket(struct file *filp) { struct sock *u_sock = NULL; struct inode *inode = filp->f_path.dentry->d_inode; @@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u) } static bool gc_in_progress = false; +#define UNIX_INFLIGHT_TRIGGER_GC 16000 void wait_for_unix_gc(void) { + /* + * If number of inflight sockets is insane, + * force a garbage collect right now. + */ + if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress) + unix_gc(); wait_event(unix_gc_wait, gc_in_progress == false); } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index d0c92dddb26b..17cd0c04d139 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -44,6 +44,38 @@ rdev_freq_to_chan(struct cfg80211_registered_device *rdev, return chan; } +static bool can_beacon_sec_chan(struct wiphy *wiphy, + struct ieee80211_channel *chan, + enum nl80211_channel_type channel_type) +{ + struct ieee80211_channel *sec_chan; + int diff; + + switch (channel_type) { + case NL80211_CHAN_HT40PLUS: + diff = 20; + break; + case NL80211_CHAN_HT40MINUS: + diff = -20; + break; + default: + return false; + } + + sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); + if (!sec_chan) + return false; + + /* we'll need a DFS capability later */ + if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | + IEEE80211_CHAN_PASSIVE_SCAN | + IEEE80211_CHAN_NO_IBSS | + IEEE80211_CHAN_RADAR)) + return false; + + return true; +} + int cfg80211_set_freq(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int freq, enum nl80211_channel_type channel_type) @@ -68,6 +100,28 @@ int cfg80211_set_freq(struct cfg80211_registered_device *rdev, if (!chan) return -EINVAL; + /* Both channels should be able to initiate communication */ + if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC || + wdev->iftype == NL80211_IFTYPE_AP || + wdev->iftype == NL80211_IFTYPE_AP_VLAN || + wdev->iftype == NL80211_IFTYPE_MESH_POINT || + wdev->iftype == NL80211_IFTYPE_P2P_GO)) { + switch (channel_type) { + case NL80211_CHAN_HT40PLUS: + case NL80211_CHAN_HT40MINUS: + if (!can_beacon_sec_chan(&rdev->wiphy, chan, + channel_type)) { + printk(KERN_DEBUG + "cfg80211: Secondary channel not " + "allowed to initiate communication\n"); + return -EINVAL; + } + break; + default: + break; + } + } + result = rdev->ops->set_channel(&rdev->wiphy, wdev ? wdev->netdev : NULL, chan, channel_type); diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index c506241f8637..4e78e3f26798 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -224,8 +224,8 @@ static int nl80211_prepare_netdev_dump(struct sk_buff *skb, } *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); - if (IS_ERR(dev)) { - err = PTR_ERR(dev); + if (IS_ERR(*rdev)) { + err = PTR_ERR(*rdev); goto out_rtnl; } diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index 771bab00754b..55187c8f6420 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c @@ -61,6 +61,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, while (len > 0) { switch (*p & X25_FAC_CLASS_MASK) { case X25_FAC_CLASS_A: + if (len < 2) + return 0; switch (*p) { case X25_FAC_REVERSE: if((p[1] & 0x81) == 0x81) { @@ -104,6 +106,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 2; break; case X25_FAC_CLASS_B: + if (len < 3) + return 0; switch (*p) { case X25_FAC_PACKET_SIZE: facilities->pacsize_in = p[1]; @@ -125,6 +129,8 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 3; break; case X25_FAC_CLASS_C: + if (len < 4) + return 0; printk(KERN_DEBUG "X.25: unknown facility %02X, " "values %02X, %02X, %02X\n", p[0], p[1], p[2], p[3]); @@ -132,26 +138,26 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, len -= 4; break; case X25_FAC_CLASS_D: + if (len < p[1] + 2) + return 0; switch (*p) { case X25_FAC_CALLING_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->calling_len = p[2]; memcpy(dte_facs->calling_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLING_AE; break; case X25_FAC_CALLED_AE: - if (p[1] > X25_MAX_DTE_FACIL_LEN) - break; + if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1) + return 0; dte_facs->called_len = p[2]; memcpy(dte_facs->called_ae, &p[3], p[1] - 1); *vc_fac_mask |= X25_MASK_CALLED_AE; break; default: printk(KERN_DEBUG "X.25: unknown facility %02X," - "length %d, values %02X, %02X, " - "%02X, %02X\n", - p[0], p[1], p[2], p[3], p[4], p[5]); + "length %d\n", p[0], p[1]); break; } len -= p[1] + 2; diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 63178961efac..f729f022be69 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c @@ -119,6 +119,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp &x25->vc_facil_mask); if (len > 0) skb_pull(skb, len); + else + return -1; /* * Copy any Call User Data. */ diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 73e7b954ad28..b25c6463c3e9 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c @@ -394,6 +394,7 @@ void __exit x25_link_free(void) list_for_each_safe(entry, tmp, &x25_neigh_list) { nb = list_entry(entry, struct x25_neigh, node); __x25_remove_neigh(nb); + dev_put(nb->dev); } write_unlock_bh(&x25_neigh_list_lock); } diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c index a2023ec52329..1e98bc0fe0a5 100644 --- a/net/xfrm/xfrm_hash.c +++ b/net/xfrm/xfrm_hash.c @@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz) if (sz <= PAGE_SIZE) n = kzalloc(sz, GFP_KERNEL); else if (hashdist) - n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); + n = vzalloc(sz); else n = (struct hlist_head *) __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index eb96ce52f178..220ebc05c7af 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -1268,7 +1268,7 @@ struct xfrm_state * xfrm_state_migrate(struct xfrm_state *x, return xc; error: - kfree(xc); + xfrm_state_put(xc); return NULL; } EXPORT_SYMBOL(xfrm_state_migrate); |