summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-12-18 01:52:57 +0100
committerDavid S. Miller <davem@davemloft.net>2016-12-17 21:27:44 -0500
commitaafe6ae9cee32df85eb5e8bb6dd1d918e6807b09 (patch)
tree4aa9958d1ddf599f4ed32d491f8ab217c9747056 /kernel
parent40e972ab652f3e9b84a8f24f517345b460962c29 (diff)
downloadtalos-obmc-linux-aafe6ae9cee32df85eb5e8bb6dd1d918e6807b09.tar.gz
talos-obmc-linux-aafe6ae9cee32df85eb5e8bb6dd1d918e6807b09.zip
bpf: dynamically allocate digest scratch buffer
Geert rightfully complained that 7bd509e311f4 ("bpf: add prog_digest and expose it via fdinfo/netlink") added a too large allocation of variable 'raw' from bss section, and should instead be done dynamically: # ./scripts/bloat-o-meter kernel/bpf/core.o.1 kernel/bpf/core.o.2 add/remove: 3/0 grow/shrink: 0/0 up/down: 33291/0 (33291) function old new delta raw - 32832 +32832 [...] Since this is only relevant during program creation path, which can be considered slow-path anyway, lets allocate that dynamically and be not implicitly dependent on verifier mutex. Move bpf_prog_calc_digest() at the beginning of replace_map_fd_with_map_ptr() and also error handling stays straight forward. Reported-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/core.c27
-rw-r--r--kernel/bpf/syscall.c2
-rw-r--r--kernel/bpf/verifier.c6
3 files changed, 21 insertions, 14 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 83e0d153b0b4..75c08b8068d6 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -136,28 +136,29 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
-#define SHA_BPF_RAW_SIZE \
- round_up(MAX_BPF_SIZE + sizeof(__be64) + 1, SHA_MESSAGE_BYTES)
-
-/* Called under verifier mutex. */
-void bpf_prog_calc_digest(struct bpf_prog *fp)
+int bpf_prog_calc_digest(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
- static u32 ws[SHA_WORKSPACE_WORDS];
- static u8 raw[SHA_BPF_RAW_SIZE];
- struct bpf_insn *dst = (void *)raw;
+ u32 raw_size = bpf_prog_digest_scratch_size(fp);
+ u32 ws[SHA_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
+ struct bpf_insn *dst;
bool was_ld_map;
- u8 *todo = raw;
+ u8 *raw, *todo;
__be32 *result;
__be64 *bits;
+ raw = vmalloc(raw_size);
+ if (!raw)
+ return -ENOMEM;
+
sha_init(fp->digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
* since they are unstable from user space side.
*/
+ dst = (void *)raw;
for (i = 0, was_ld_map = false; i < fp->len; i++) {
dst[i] = fp->insnsi[i];
if (!was_ld_map &&
@@ -177,12 +178,13 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
}
}
- psize = fp->len * sizeof(struct bpf_insn);
- memset(&raw[psize], 0, sizeof(raw) - psize);
+ psize = bpf_prog_insn_size(fp);
+ memset(&raw[psize], 0, raw_size - psize);
raw[psize++] = 0x80;
bsize = round_up(psize, SHA_MESSAGE_BYTES);
blocks = bsize / SHA_MESSAGE_BYTES;
+ todo = raw;
if (bsize - psize >= sizeof(__be64)) {
bits = (__be64 *)(todo + bsize - sizeof(__be64));
} else {
@@ -199,6 +201,9 @@ void bpf_prog_calc_digest(struct bpf_prog *fp)
result = (__force __be32 *)fp->digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
result[i] = cpu_to_be32(fp->digest[i]);
+
+ vfree(raw);
+ return 0;
}
static bool bpf_is_jmp_and_has_target(const struct bpf_insn *insn)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4819ec9d95f6..35d674c1f12e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -811,7 +811,7 @@ static int bpf_prog_load(union bpf_attr *attr)
err = -EFAULT;
if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
- prog->len * sizeof(struct bpf_insn)) != 0)
+ bpf_prog_insn_size(prog)) != 0)
goto free_prog;
prog->orig_prog = NULL;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 81e267bc4640..64b7b1abe087 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2931,6 +2931,10 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len;
int i, j, err;
+ err = bpf_prog_calc_digest(env->prog);
+ if (err)
+ return err;
+
for (i = 0; i < insn_cnt; i++, insn++) {
if (BPF_CLASS(insn->code) == BPF_LDX &&
(BPF_MODE(insn->code) != BPF_MEM || insn->imm != 0)) {
@@ -3178,8 +3182,6 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr)
log_level = 0;
}
- bpf_prog_calc_digest(env->prog);
-
ret = replace_map_fd_with_map_ptr(env);
if (ret < 0)
goto skip_full_check;
OpenPOWER on IntegriCloud