diff options
| -rw-r--r-- | kernel/bpf/verifier.c | 9 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c | 54 |
2 files changed, 60 insertions, 3 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8e4f69918693..4fbacd2149cd 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -17678,12 +17678,15 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } /* detect if R == 0 where R is returned from bpf_map_lookup_elem(). + * Also does the same detection for a register whose the value is + * known to be 0. * NOTE: these optimizations below are related with pointer comparison * which will never be JMP32. */ - if (!is_jmp32 && BPF_SRC(insn->code) == BPF_K && - insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && - type_may_be_null(dst_reg->type)) { + if (!is_jmp32 && (opcode == BPF_JEQ || opcode == BPF_JNE) && + type_may_be_null(dst_reg->type) && + ((BPF_SRC(insn->code) == BPF_K && insn->imm == 0) || + (BPF_SRC(insn->code) == BPF_X && register_is_null(src_reg)))) { /* Mark all identical registers in each branch as either * safe or unknown depending R == 0 or R != 0 conditional. */ diff --git a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c index bf16b00502f2..3d1e8de4390c 100644 --- a/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c +++ b/tools/testing/selftests/bpf/progs/verifier_jeq_infer_not_null.c @@ -210,4 +210,58 @@ l0_%=: /* return 0; */ \ : __clobber_all); } +/* Verified that we can detect the pointer as non_null when comparing with + * register with value 0. JEQ test case. + */ +SEC("xdp") +__success __log_level(2) +/* to make sure the branch is not falsely predicted*/ +__msg("r0 = *(u32 *)(r0 +0)") +__msg("from 7 to 9") +__naked void jeq_reg_reg_null_check(void) +{ + asm volatile (" \ + *(u32*)(r10 - 8) = 0; \ + r1 = %[map_xskmap] ll; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + if r0 == r1 goto 1f; \ + r0 = *(u32*)(r0 +0); \ +1: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap) + : __clobber_all); +} + +/* Same as above but for JNE. + */ +SEC("xdp") +__success __log_level(2) +/* to make sure the branch is not falsely predicted*/ +__msg("r0 = *(u32 *)(r0 +0)") +__msg("from 7 to 9") +__naked void jne_reg_reg_null_check(void) +{ + asm volatile (" \ + *(u32*)(r10 - 8) = 0; \ + r1 = %[map_xskmap] ll; \ + r2 = r10; \ + r2 += -8; \ + call %[bpf_map_lookup_elem]; \ + r1 = 0; \ + if r0 != r1 goto 1f; \ + goto 2f; \ +1: r0 = *(u32*)(r0 +0); \ +2: r0 = 0; \ + exit; \ +" : + : __imm(bpf_map_lookup_elem), + __imm_addr(map_xskmap) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; |
