diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2019-05-29 14:31:06 +0300 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-05-29 14:31:06 +0300 |
commit | 10b3c44131982e0aa889016890ada36a514a58ca (patch) | |
tree | d0cd9d846507593f603b06093730cf1c9c6a18c3 | |
parent | bd95e678e0f6e18351ecdc147ca819145db9ed7b (diff) | |
parent | c25d60c12534ada10312a43f1c584fa7384aa2e7 (diff) | |
download | linux-10b3c44131982e0aa889016890ada36a514a58ca.tar.xz |
Merge branch 'bpf-subreg-tests'
Jiong Wang says:
====================
JIT back-ends need to guarantee high 32-bit cleared whenever one eBPF insn
write low 32-bit sub-register only. It is possible that some JIT back-ends
have failed doing this and are silently generating wrong image.
This set completes the unit tests, so bug on this could be exposed in JITs.
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r-- | tools/testing/selftests/bpf/verifier/basic_instr.c | 39 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/verifier/subreg.c | 533 |
2 files changed, 533 insertions, 39 deletions
diff --git a/tools/testing/selftests/bpf/verifier/basic_instr.c b/tools/testing/selftests/bpf/verifier/basic_instr.c index 4d844089938e..ed91a7b9a456 100644 --- a/tools/testing/selftests/bpf/verifier/basic_instr.c +++ b/tools/testing/selftests/bpf/verifier/basic_instr.c @@ -132,42 +132,3 @@ .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = ACCEPT, }, -{ - "and32 reg zero extend check", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_MOV64_IMM(BPF_REG_2, -2), - BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "or32 reg zero extend check", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_MOV64_IMM(BPF_REG_2, -2), - BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, -{ - "xor32 reg zero extend check", - .insns = { - BPF_MOV64_IMM(BPF_REG_0, -1), - BPF_MOV64_IMM(BPF_REG_2, 0), - BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_2), - BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), - BPF_EXIT_INSN(), - }, - .prog_type = BPF_PROG_TYPE_SCHED_CLS, - .result = ACCEPT, - .retval = 0, -}, diff --git a/tools/testing/selftests/bpf/verifier/subreg.c b/tools/testing/selftests/bpf/verifier/subreg.c new file mode 100644 index 000000000000..4c4133c80440 --- /dev/null +++ b/tools/testing/selftests/bpf/verifier/subreg.c @@ -0,0 +1,533 @@ +/* This file contains sub-register zero extension checks for insns defining + * sub-registers, meaning: + * - All insns under BPF_ALU class. Their BPF_ALU32 variants or narrow width + * forms (BPF_END) could define sub-registers. + * - Narrow direct loads, BPF_B/H/W | BPF_LDX. + * - BPF_LD is not exposed to JIT back-ends, so no need for testing. + * + * "get_prandom_u32" is used to initialize low 32-bit of some registers to + * prevent potential optimizations done by verifier or JIT back-ends which could + * optimize register back into constant when range info shows one register is a + * constant. + */ +{ + "add32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "add32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + /* An insn could have no effect on the low 32-bit, for example: + * a = a + 0 + * a = a | 0 + * a = a & -1 + * But, they should still zero high 32-bit. + */ + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ADD, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "sub32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_SUB, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mul32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MUL, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "div32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_DIV, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000001ULL), + BPF_ALU32_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "or32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_OR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x1ffffffffULL), + BPF_ALU32_REG(BPF_AND, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "and32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_AND, BPF_REG_0, -2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_LSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "lsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_LSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_RSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "rsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_RSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "neg32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_NEG, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_0, -1), + BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mod32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_MOD, BPF_REG_0, 2), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_ALU32_REG(BPF_XOR, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "xor32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_XOR, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x100000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_1, BPF_REG_0), + BPF_LD_IMM64(BPF_REG_0, 0x100000000ULL), + BPF_MOV32_REG(BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "mov32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV32_IMM(BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "arsh32 imm zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 1), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_le) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_LE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end16 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 16), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "end32 (to_be) reg zero extend check", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 32), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_6), + BPF_ENDIAN(BPF_TO_BE, BPF_REG_0, 32), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_b zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_h zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, +{ + "ldx_w zero extend check", + .insns = { + BPF_MOV64_REG(BPF_REG_6, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, -4), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0, 0xfaceb00c), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32), + BPF_LD_IMM64(BPF_REG_1, 0x1000000000ULL), + BPF_ALU64_REG(BPF_OR, BPF_REG_0, BPF_REG_1), + BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 0), + BPF_ALU64_IMM(BPF_RSH, BPF_REG_0, 32), + BPF_EXIT_INSN(), + }, + .result = ACCEPT, + .retval = 0, +}, |