diff options
author | David S. Miller <davem@davemloft.net> | 2019-08-12 00:49:34 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-08-12 00:49:34 +0300 |
commit | 9481382b36fe1dbdada9303ebd6b9dba915da989 (patch) | |
tree | 704dfcc13581c9aae6fe2a4669d668b4b27207ac /arch | |
parent | 57c722e932cfb82e9820bbaae1b1f7222ea97b52 (diff) | |
parent | 4f7aafd78aeaf18a4f6dea9415df60e745c9dfa7 (diff) | |
download | linux-9481382b36fe1dbdada9303ebd6b9dba915da989.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says:
====================
pull-request: bpf 2019-08-11
The following pull-request contains BPF updates for your *net* tree.
The main changes are:
1) x64 JIT code generation fix for backward-jumps to 1st insn, from Alexei.
2) Fix buggy multi-closing of BTF file descriptor in libbpf, from Andrii.
3) Fix libbpf_num_possible_cpus() to make it thread safe, from Takshak.
4) Fix bpftool to dump an error if pinning fails, from Jakub.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/net/bpf_jit_comp.c | 9 |
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index eaaed5bfc4a4..991549a1c5f3 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -390,8 +390,9 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog)); + addrs[0] = prog - temp; - for (i = 0; i < insn_cnt; i++, insn++) { + for (i = 1; i <= insn_cnt; i++, insn++) { const s32 imm32 = insn->imm; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; @@ -1105,7 +1106,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) extra_pass = true; goto skip_init_addrs; } - addrs = kmalloc_array(prog->len, sizeof(*addrs), GFP_KERNEL); + addrs = kmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL); if (!addrs) { prog = orig_prog; goto out_addrs; @@ -1115,7 +1116,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) * Before first pass, make a rough estimation of addrs[] * each BPF instruction is translated to less than 64 bytes */ - for (proglen = 0, i = 0; i < prog->len; i++) { + for (proglen = 0, i = 0; i <= prog->len; i++) { proglen += 64; addrs[i] = proglen; } @@ -1180,7 +1181,7 @@ out_image: if (!image || !prog->is_func || extra_pass) { if (image) - bpf_prog_fill_jited_linfo(prog, addrs); + bpf_prog_fill_jited_linfo(prog, addrs + 1); out_addrs: kfree(addrs); kfree(jit_data); |