summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-02-23 15:14:17 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-02-23 15:14:17 -0800
commit9cb9c07d6b0c5fd97d83b8ab14d7e308ba4b612f (patch)
treeb0c3add28508577157a1012474d38d785a84159c /kernel
parent2eb02aa94f99ae2b94ab3c42d5d605128fd5c0c5 (diff)
parenta5f7add332b4ea6d4b9480971b3b0f5e66466ae9 (diff)
downloadblackbird-obmc-linux-9cb9c07d6b0c5fd97d83b8ab14d7e308ba4b612f.tar.gz
blackbird-obmc-linux-9cb9c07d6b0c5fd97d83b8ab14d7e308ba4b612f.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix TTL offset calculation in mac80211 mesh code, from Peter Oh. 2) Fix races with procfs in ipt_CLUSTERIP, from Cong Wang. 3) Memory leak fix in lpm_trie BPF map code, from Yonghong Song. 4) Need to use GFP_ATOMIC in BPF cpumap allocations, from Jason Wang. 5) Fix potential deadlocks in netfilter getsockopt() code paths, from Paolo Abeni. 6) Netfilter stackpointer size checks really are needed to validate user input, from Florian Westphal. 7) Missing timer init in x_tables, from Paolo Abeni. 8) Don't use WQ_MEM_RECLAIM in mac80211 hwsim, from Johannes Berg. 9) When an ibmvnic device is brought down then back up again, it can be sent queue entries from a previous session, handle this properly instead of crashing. From Thomas Falcon. 10) Fix TCP checksum on LRO buffers in mlx5e, from Gal Pressman. 11) When we are dumping filters in cls_api, the output SKB is empty, and the filter we are dumping is too large for the space in the SKB, we should return -EMSGSIZE like other netlink dump operations do. Otherwise userland has no signal that is needs to increase the size of its read buffer. From Roman Kapl. 12) Several XDP fixes for virtio_net, from Jesper Dangaard Brouer. 13) Module refcount leak in netlink when a dump start fails, from Jason Donenfeld. 14) Handle sub-optimal GSO sizes better in TCP BBR congestion control, from Eric Dumazet. 15) Releasing bpf per-cpu arraymaps can take a long time, add a condtional scheduling point. From Eric Dumazet. 16) Implement retpolines for tail calls in x64 and arm64 bpf JITs. From Daniel Borkmann. 17) Fix page leak in gianfar driver, from Andy Spencer. 18) Missed clearing of estimator scratch buffer, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (76 commits) net_sched: gen_estimator: fix broken estimators based on percpu stats gianfar: simplify FCS handling and fix memory leak ipv6 sit: work around bogus gcc-8 -Wrestrict warning macvlan: fix use-after-free in macvlan_common_newlink() bpf, arm64: fix out of bounds access in tail call bpf, x64: implement retpoline for tail call rxrpc: Fix send in rxrpc_send_data_packet() net: aquantia: Fix error handling in aq_pci_probe() bpf: fix rcu lockdep warning for lpm_trie map_free callback bpf: add schedule points in percpu arrays management regulatory: add NUL to request alpha2 ibmvnic: Fix early release of login buffer net/smc9194: Remove bogus CONFIG_MAC reference net: ipv4: Set addr_type in hash_keys for forwarded case tcp_bbr: better deal with suboptimal GSO smsc75xx: fix smsc75xx_set_features() netlink: put module reference if dump start fails selftests/bpf/test_maps: exit child process without error in ENOMEM case selftests/bpf: update gitignore with test_libbpf_open selftests/bpf: tcpbpf_kern: use in6_* macros from glibc ..
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/arraymap.c33
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/cpumap.c2
-rw-r--r--kernel/bpf/lpm_trie.c14
-rw-r--r--kernel/bpf/sockmap.c3
-rw-r--r--kernel/trace/bpf_trace.c2
6 files changed, 34 insertions, 22 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index b1f66480135b..14750e7c5ee4 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
{
int i;
- for (i = 0; i < array->map.max_entries; i++)
+ for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]);
+ cond_resched();
+ }
}
static int bpf_array_alloc_percpu(struct bpf_array *array)
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
return -ENOMEM;
}
array->pptrs[i] = ptr;
+ cond_resched();
}
return 0;
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
{
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
- int numa_node = bpf_map_attr_numa_node(attr);
+ int ret, numa_node = bpf_map_attr_numa_node(attr);
u32 elem_size, index_mask, max_entries;
bool unpriv = !capable(CAP_SYS_ADMIN);
+ u64 cost, array_size, mask64;
struct bpf_array *array;
- u64 array_size, mask64;
elem_size = round_up(attr->value_size, 8);
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
array_size += (u64) max_entries * elem_size;
/* make sure there is no u32 overflow later in round_up() */
- if (array_size >= U32_MAX - PAGE_SIZE)
+ cost = array_size;
+ if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
+ if (percpu) {
+ cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
+ if (cost >= U32_MAX - PAGE_SIZE)
+ return ERR_PTR(-ENOMEM);
+ }
+ cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(cost);
+ if (ret < 0)
+ return ERR_PTR(ret);
/* allocate all map elements and zero-initialize them */
array = bpf_map_area_alloc(array_size, numa_node);
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
/* copy mandatory map attributes */
bpf_map_init_from_attr(&array->map, attr);
+ array->map.pages = cost;
array->elem_size = elem_size;
- if (!percpu)
- goto out;
-
- array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
-
- if (array_size >= U32_MAX - PAGE_SIZE ||
- bpf_array_alloc_percpu(array)) {
+ if (percpu && bpf_array_alloc_percpu(array)) {
bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
-out:
- array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
return &array->map;
}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 29ca9208dcfa..d315b393abdd 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
* so always copy 'cnt' prog_ids to the user.
* In a rare race the user will see zero prog_ids
*/
- ids = kcalloc(cnt, sizeof(u32), GFP_USER);
+ ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
if (!ids)
return -ENOMEM;
rcu_read_lock();
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index fbfdada6caee..a4bb0b34375a 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
int map_id)
{
- gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
+ gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
int numa, err;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 7b469d10d0e9..b4b5b81e7251 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
struct lpm_trie_node __rcu **slot;
struct lpm_trie_node *node;
- raw_spin_lock(&trie->lock);
+ /* Wait for outstanding programs to complete
+ * update/lookup/delete/get_next_key and free the trie.
+ */
+ synchronize_rcu();
/* Always start at the root and walk down to a node that has no
* children. Then free that node, nullify its reference in the parent
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
slot = &trie->root;
for (;;) {
- node = rcu_dereference_protected(*slot,
- lockdep_is_held(&trie->lock));
+ node = rcu_dereference_protected(*slot, 1);
if (!node)
- goto unlock;
+ goto out;
if (rcu_access_pointer(node->child[0])) {
slot = &node->child[0];
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
}
}
-unlock:
- raw_spin_unlock(&trie->lock);
+out:
+ kfree(trie);
}
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 48c33417d13c..a927e89dad6e 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
{
struct bpf_stab *stab;
- int err = -EINVAL;
u64 cost;
+ int err;
if (!capable(CAP_NET_ADMIN))
return ERR_PTR(-EPERM);
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
/* make sure page count doesn't overflow */
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
+ err = -EINVAL;
if (cost >= U32_MAX - PAGE_SIZE)
goto free_stab;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index fc2838ac8b78..c0a9e310d715 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
return -EINVAL;
if (copy_from_user(&query, uquery, sizeof(query)))
return -EFAULT;
+ if (query.ids_len > BPF_TRACE_MAX_PROGS)
+ return -E2BIG;
mutex_lock(&bpf_event_mutex);
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
OpenPOWER on IntegriCloud