summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorNicolas Schichan <nschichan@freebox.fr>2015-05-07 18:14:21 +0300
committerZefan Li <lizefan@huawei.com>2015-09-18 04:20:39 +0300
commit48b33c5b1759561efb72db8ba729a5c2656e5a1d (patch)
treed818914493d0295f1c3435481d6e7cc1541bbe0d /arch
parentf5d35596f9a9e21217436a7892d3ed0366cf0997 (diff)
downloadlinux-48b33c5b1759561efb72db8ba729a5c2656e5a1d.tar.xz
ARM: net: delegate filter to kernel interpreter when imm_offset() return value can't fit into 12bits.
commit 0b59d8806a31bb0267b3a461e8fef20c727bdbf6 upstream. The ARM JIT code emits "ldr rX, [pc, #offset]" to access the literal pool. #offset maximum value is 4095 and if the generated code is too large, the #offset value can overflow and not point to the expected slot in the literal pool. Additionally, when overflow occurs, bits of the overflow can end up changing the destination register of the ldr instruction. Fix that by detecting the overflow in imm_offset() and setting a flag that is checked for each BPF instructions converted in build_body(). As of now it can only be detected in the second pass. As a result the second build_body() call can now fail, so add the corresponding cleanup code in that case. Using multiple literal pools in the JITed code is going to require lots of intrusive changes to the JIT code (which would better be done as a feature instead of fix), just delegating to the kernel BPF interpreter in that case is a more straight forward, minimal fix and easy to backport. Fixes: ddecdfcea0ae ("ARM: 7259/3: net: JIT compiler for packet filters") Signed-off-by: Nicolas Schichan <nschichan@freebox.fr> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net> Signed-off-by: Zefan Li <lizefan@huawei.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/net/bpf_jit_32.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 62135849f48b..ad941453340a 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -52,6 +52,7 @@
#define SEEN_DATA (1 << (BPF_MEMWORDS + 3))
#define FLAG_NEED_X_RESET (1 << 0)
+#define FLAG_IMM_OVERFLOW (1 << 1)
struct jit_ctx {
const struct sk_filter *skf;
@@ -286,6 +287,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
/* PC in ARM mode == address of the instruction + 8 */
imm = offset - (8 + ctx->idx * 4);
+ if (imm & ~0xfff) {
+ /*
+ * literal pool is too far, signal it into flags. we
+ * can only detect it on the second pass unfortunately.
+ */
+ ctx->flags |= FLAG_IMM_OVERFLOW;
+ return 0;
+ }
+
return imm;
}
@@ -817,6 +827,14 @@ b_epilogue:
default:
return -1;
}
+
+ if (ctx->flags & FLAG_IMM_OVERFLOW)
+ /*
+ * this instruction generated an overflow when
+ * trying to access the literal pool, so
+ * delegate this filter to the kernel interpreter.
+ */
+ return -1;
}
/* compute offsets only during the first pass */
@@ -876,7 +894,14 @@ void bpf_jit_compile(struct sk_filter *fp)
ctx.idx = 0;
build_prologue(&ctx);
- build_body(&ctx);
+ if (build_body(&ctx) < 0) {
+#if __LINUX_ARM_ARCH__ < 7
+ if (ctx.imm_count)
+ kfree(ctx.imms);
+#endif
+ bpf_jit_binary_free(header);
+ goto out;
+ }
build_epilogue(&ctx);
flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));