diff options
| -rw-r--r-- | include/linux/bpf_verifier.h | 4 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 201 | ||||
| -rw-r--r-- | tools/testing/selftests/bpf/progs/verifier_bounds.c | 51 |
3 files changed, 148 insertions, 108 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 090aa26d1c98..b129e0aaee20 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -837,7 +837,9 @@ struct bpf_verifier_env { u64 scratched_stack_slots; u64 prev_log_pos, prev_insn_print_pos; /* buffer used to temporary hold constants as scalar registers */ - struct bpf_reg_state fake_reg[2]; + struct bpf_reg_state fake_reg[1]; + /* buffers used to save updated reg states while simulating branches */ + struct bpf_reg_state true_reg1, true_reg2, false_reg1, false_reg2; /* buffer used to generate temporary string representations, * e.g., in reg_type_str() to generate reg_type string */ diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8c1cf2eb6cbb..a431b7d50e1b 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2788,8 +2788,13 @@ static void __reg_bound_offset(struct bpf_reg_state *reg) reg->var_off = tnum_or(tnum_clear_subreg(var64_off), var32_off); } +static bool range_bounds_violation(struct bpf_reg_state *reg); + static void reg_bounds_sync(struct bpf_reg_state *reg) { + /* If the input reg_state is invalid, we can exit early */ + if (range_bounds_violation(reg)) + return; /* We might have learned new bounds from the var_off. */ __update_reg_bounds(reg); /* We might have learned something about the sign bit. */ @@ -2804,39 +2809,55 @@ static void reg_bounds_sync(struct bpf_reg_state *reg) __update_reg_bounds(reg); } +static bool range_bounds_violation(struct bpf_reg_state *reg) +{ + return (reg->umin_value > reg->umax_value || reg->smin_value > reg->smax_value || + reg->u32_min_value > reg->u32_max_value || + reg->s32_min_value > reg->s32_max_value); +} + +static bool const_tnum_range_mismatch(struct bpf_reg_state *reg) +{ + u64 uval = reg->var_off.value; + s64 sval = (s64)uval; + + if (!tnum_is_const(reg->var_off)) + return false; + + return reg->umin_value != uval || reg->umax_value != uval || + reg->smin_value != sval || reg->smax_value != sval; +} + +static bool const_tnum_range_mismatch_32(struct bpf_reg_state *reg) +{ + u32 uval32 = tnum_subreg(reg->var_off).value; + s32 sval32 = (s32)uval32; + + if (!tnum_subreg_is_const(reg->var_off)) + return false; + + return reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || + reg->s32_min_value != sval32 || reg->s32_max_value != sval32; +} + static int reg_bounds_sanity_check(struct bpf_verifier_env *env, struct bpf_reg_state *reg, const char *ctx) { const char *msg; - if (reg->umin_value > reg->umax_value || - reg->smin_value > reg->smax_value || - reg->u32_min_value > reg->u32_max_value || - reg->s32_min_value > reg->s32_max_value) { - msg = "range bounds violation"; - goto out; + if (range_bounds_violation(reg)) { + msg = "range bounds violation"; + goto out; } - if (tnum_is_const(reg->var_off)) { - u64 uval = reg->var_off.value; - s64 sval = (s64)uval; - - if (reg->umin_value != uval || reg->umax_value != uval || - reg->smin_value != sval || reg->smax_value != sval) { - msg = "const tnum out of sync with range bounds"; - goto out; - } + if (const_tnum_range_mismatch(reg)) { + msg = "const tnum out of sync with range bounds"; + goto out; } - if (tnum_subreg_is_const(reg->var_off)) { - u32 uval32 = tnum_subreg(reg->var_off).value; - s32 sval32 = (s32)uval32; - - if (reg->u32_min_value != uval32 || reg->u32_max_value != uval32 || - reg->s32_min_value != sval32 || reg->s32_max_value != sval32) { - msg = "const subreg tnum out of sync with range bounds"; - goto out; - } + if (const_tnum_range_mismatch_32(reg)) { + msg = "const subreg tnum out of sync with range bounds"; + goto out; } return 0; @@ -16765,11 +16786,50 @@ static void find_good_pkt_pointers(struct bpf_verifier_state *vstate, })); } +static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, + u8 opcode, bool is_jmp32); +static u8 rev_opcode(u8 opcode); + +/* + * Learn more information about live branches by simulating refinement on both branches. + * regs_refine_cond_op() is sound, so producing ill-formed register bounds for the branch means + * that branch is dead. + */ +static int simulate_both_branches_taken(struct bpf_verifier_env *env, u8 opcode, bool is_jmp32) +{ + /* Fallthrough (FALSE) branch */ + regs_refine_cond_op(&env->false_reg1, &env->false_reg2, rev_opcode(opcode), is_jmp32); + reg_bounds_sync(&env->false_reg1); + reg_bounds_sync(&env->false_reg2); + /* + * If there is a range bounds violation in *any* of the abstract values in either + * reg_states in the FALSE branch (i.e. reg1, reg2), the FALSE branch must be dead. Only + * TRUE branch will be taken. + */ + if (range_bounds_violation(&env->false_reg1) || range_bounds_violation(&env->false_reg2)) + return 1; + + /* Jump (TRUE) branch */ + regs_refine_cond_op(&env->true_reg1, &env->true_reg2, opcode, is_jmp32); + reg_bounds_sync(&env->true_reg1); + reg_bounds_sync(&env->true_reg2); + /* + * If there is a range bounds violation in *any* of the abstract values in either + * reg_states in the TRUE branch (i.e. true_reg1, true_reg2), the TRUE branch must be dead. + * Only FALSE branch will be taken. + */ + if (range_bounds_violation(&env->true_reg1) || range_bounds_violation(&env->true_reg2)) + return 0; + + /* Both branches are possible, we can't determine which one will be taken. */ + return -1; +} + /* * <reg1> <op> <reg2>, currently assuming reg2 is a constant */ -static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, - u8 opcode, bool is_jmp32) +static int is_scalar_branch_taken(struct bpf_verifier_env *env, struct bpf_reg_state *reg1, + struct bpf_reg_state *reg2, u8 opcode, bool is_jmp32) { struct tnum t1 = is_jmp32 ? tnum_subreg(reg1->var_off) : reg1->var_off; struct tnum t2 = is_jmp32 ? tnum_subreg(reg2->var_off) : reg2->var_off; @@ -16921,7 +16981,7 @@ static int is_scalar_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_sta break; } - return -1; + return simulate_both_branches_taken(env, opcode, is_jmp32); } static int flip_opcode(u32 opcode) @@ -16992,8 +17052,8 @@ static int is_pkt_ptr_branch_taken(struct bpf_reg_state *dst_reg, * -1 - unknown. Example: "if (reg1 < 5)" is unknown when register value * range [0,10] */ -static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg2, - u8 opcode, bool is_jmp32) +static int is_branch_taken(struct bpf_verifier_env *env, struct bpf_reg_state *reg1, + struct bpf_reg_state *reg2, u8 opcode, bool is_jmp32) { if (reg_is_pkt_pointer_any(reg1) && reg_is_pkt_pointer_any(reg2) && !is_jmp32) return is_pkt_ptr_branch_taken(reg1, reg2, opcode); @@ -17031,7 +17091,7 @@ static int is_branch_taken(struct bpf_reg_state *reg1, struct bpf_reg_state *reg } /* now deal with two scalars, but not necessarily constants */ - return is_scalar_branch_taken(reg1, reg2, opcode, is_jmp32); + return is_scalar_branch_taken(env, reg1, reg2, opcode, is_jmp32); } /* Opcode that corresponds to a *false* branch condition. @@ -17122,8 +17182,8 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state /* u32_min_value is not equal to 0xffffffff at this point, * because otherwise u32_max_value is 0xffffffff as well, * in such a case both reg1 and reg2 would be constants, - * jump would be predicted and reg_set_min_max() won't - * be called. + * jump would be predicted and regs_refine_cond_op() + * wouldn't be called. * * Same reasoning works for all {u,s}{min,max}{32,64} cases * below. @@ -17230,49 +17290,15 @@ static void regs_refine_cond_op(struct bpf_reg_state *reg1, struct bpf_reg_state } } -/* Adjusts the register min/max values in the case that the dst_reg and - * src_reg are both SCALAR_VALUE registers (or we are simply doing a BPF_K - * check, in which case we have a fake SCALAR_VALUE representing insn->imm). - * Technically we can do similar adjustments for pointers to the same object, - * but we don't support that right now. - */ -static int reg_set_min_max(struct bpf_verifier_env *env, - struct bpf_reg_state *true_reg1, - struct bpf_reg_state *true_reg2, - struct bpf_reg_state *false_reg1, - struct bpf_reg_state *false_reg2, - u8 opcode, bool is_jmp32) +/* Check for invariant violations on the registers for both branches of a condition */ +static int regs_bounds_sanity_check_branches(struct bpf_verifier_env *env) { int err; - /* If either register is a pointer, we can't learn anything about its - * variable offset from the compare (unless they were a pointer into - * the same object, but we don't bother with that). - */ - if (false_reg1->type != SCALAR_VALUE || false_reg2->type != SCALAR_VALUE) - return 0; - - /* We compute branch direction for same SCALAR_VALUE registers in - * is_scalar_branch_taken(). For unknown branch directions (e.g., BPF_JSET) - * on the same registers, we don't need to adjust the min/max values. - */ - if (false_reg1 == false_reg2) - return 0; - - /* fallthrough (FALSE) branch */ - regs_refine_cond_op(false_reg1, false_reg2, rev_opcode(opcode), is_jmp32); - reg_bounds_sync(false_reg1); - reg_bounds_sync(false_reg2); - - /* jump (TRUE) branch */ - regs_refine_cond_op(true_reg1, true_reg2, opcode, is_jmp32); - reg_bounds_sync(true_reg1); - reg_bounds_sync(true_reg2); - - err = reg_bounds_sanity_check(env, true_reg1, "true_reg1"); - err = err ?: reg_bounds_sanity_check(env, true_reg2, "true_reg2"); - err = err ?: reg_bounds_sanity_check(env, false_reg1, "false_reg1"); - err = err ?: reg_bounds_sanity_check(env, false_reg2, "false_reg2"); + err = reg_bounds_sanity_check(env, &env->true_reg1, "true_reg1"); + err = err ?: reg_bounds_sanity_check(env, &env->true_reg2, "true_reg2"); + err = err ?: reg_bounds_sanity_check(env, &env->false_reg1, "false_reg1"); + err = err ?: reg_bounds_sanity_check(env, &env->false_reg2, "false_reg2"); return err; } @@ -17656,7 +17682,11 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, } is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; - pred = is_branch_taken(dst_reg, src_reg, opcode, is_jmp32); + copy_register_state(&env->false_reg1, dst_reg); + copy_register_state(&env->false_reg2, src_reg); + copy_register_state(&env->true_reg1, dst_reg); + copy_register_state(&env->true_reg2, src_reg); + pred = is_branch_taken(env, dst_reg, src_reg, opcode, is_jmp32); if (pred >= 0) { /* If we get here with a dst_reg pointer type it is because * above is_branch_taken() special cased the 0 comparison. @@ -17720,27 +17750,16 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, return PTR_ERR(other_branch); other_branch_regs = other_branch->frame[other_branch->curframe]->regs; - if (BPF_SRC(insn->code) == BPF_X) { - err = reg_set_min_max(env, - &other_branch_regs[insn->dst_reg], - &other_branch_regs[insn->src_reg], - dst_reg, src_reg, opcode, is_jmp32); - } else /* BPF_SRC(insn->code) == BPF_K */ { - /* reg_set_min_max() can mangle the fake_reg. Make a copy - * so that these are two different memory locations. The - * src_reg is not used beyond here in context of K. - */ - memcpy(&env->fake_reg[1], &env->fake_reg[0], - sizeof(env->fake_reg[0])); - err = reg_set_min_max(env, - &other_branch_regs[insn->dst_reg], - &env->fake_reg[0], - dst_reg, &env->fake_reg[1], - opcode, is_jmp32); - } + err = regs_bounds_sanity_check_branches(env); if (err) return err; + copy_register_state(dst_reg, &env->false_reg1); + copy_register_state(src_reg, &env->false_reg2); + copy_register_state(&other_branch_regs[insn->dst_reg], &env->true_reg1); + if (BPF_SRC(insn->code) == BPF_X) + copy_register_state(&other_branch_regs[insn->src_reg], &env->true_reg2); + if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id && !WARN_ON_ONCE(src_reg->id != other_branch_regs[insn->src_reg].id)) { diff --git a/tools/testing/selftests/bpf/progs/verifier_bounds.c b/tools/testing/selftests/bpf/progs/verifier_bounds.c index bb20f0f06f05..c1ae013dee29 100644 --- a/tools/testing/selftests/bpf/progs/verifier_bounds.c +++ b/tools/testing/selftests/bpf/progs/verifier_bounds.c @@ -1066,7 +1066,6 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("bound check with JMP_JSLT for crossing 64-bit signed boundary") __success __retval(0) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void crossing_64_bit_signed_boundary_2(void) { asm volatile (" \ @@ -1148,7 +1147,6 @@ l0_%=: r0 = 0; \ SEC("xdp") __description("bound check with JMP32_JSLT for crossing 32-bit signed boundary") __success __retval(0) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void crossing_32_bit_signed_boundary_2(void) { asm volatile (" \ @@ -1536,7 +1534,7 @@ __naked void sub32_partial_overflow(void) SEC("socket") __description("dead branch on jset, does not result in invariants violation error") __success __log_level(2) -__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__retval(0) __naked void jset_range_analysis(void) { asm volatile (" \ @@ -1572,7 +1570,7 @@ l0_%=: r0 = 0; \ */ SEC("socket") __description("bounds deduction cross sign boundary, negative overlap") -__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__success __log_level(2) __msg("7: (1f) r0 -= r6 {{.*}} R0=scalar(smin=smin32=-655,smax=smax32=-146,umin=0xfffffffffffffd71,umax=0xffffffffffffff6e,umin32=0xfffffd71,umax32=0xffffff6e,var_off=(0xfffffffffffffc00; 0x3ff))") __retval(0) __naked void bounds_deduct_negative_overlap(void) @@ -1616,7 +1614,7 @@ l0_%=: r0 = 0; \ */ SEC("socket") __description("bounds deduction cross sign boundary, positive overlap") -__success __log_level(2) __flag(BPF_F_TEST_REG_INVARIANTS) +__success __log_level(2) __msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=0,smax=umax=smax32=umax32=127,var_off=(0x0; 0x7f))") __retval(0) __naked void bounds_deduct_positive_overlap(void) @@ -1649,7 +1647,7 @@ l0_%=: r0 = 0; \ */ SEC("socket") __description("bounds deduction cross sign boundary, two overlaps") -__failure __flag(BPF_F_TEST_REG_INVARIANTS) +__failure __msg("3: (2d) if r0 > r1 {{.*}} R0=scalar(smin=smin32=-128,smax=smax32=127,umax=0xffffffffffffff80)") __msg("frame pointer is read only") __naked void bounds_deduct_two_overlaps(void) @@ -1713,7 +1711,7 @@ SEC("socket") __description("conditional jump on same register, branch taken") __not_msg("20: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__retval(0) __naked void condition_jump_on_same_register(void *ctx) { asm volatile(" \ @@ -1748,7 +1746,7 @@ SEC("socket") __description("jset on same register, constant value branch taken") __not_msg("7: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__retval(0) __naked void jset_on_same_register_1(void *ctx) { asm volatile(" \ @@ -1770,7 +1768,7 @@ SEC("socket") __description("jset on same register, scalar value branch taken") __not_msg("12: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__retval(0) __flag(BPF_F_TEST_REG_INVARIANTS) +__retval(0) __naked void jset_on_same_register_2(void *ctx) { asm volatile(" \ @@ -1800,7 +1798,6 @@ __description("jset on same register, scalar value unknown branch 1") __msg("3: (b7) r0 = 0 {{.*}} R0=0") __msg("5: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void jset_on_same_register_3(void *ctx) { asm volatile(" \ @@ -1822,7 +1819,6 @@ __description("jset on same register, scalar value unknown branch 2") __msg("4: (b7) r0 = 0 {{.*}} R0=0") __msg("6: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void jset_on_same_register_4(void *ctx) { asm volatile(" \ @@ -1845,7 +1841,6 @@ __description("jset on same register, scalar value unknown branch 3") __msg("4: (b7) r0 = 0 {{.*}} R0=0") __msg("6: (b7) r0 = 1 {{.*}} R0=1") __success __log_level(2) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void jset_on_same_register_5(void *ctx) { asm volatile(" \ @@ -1877,7 +1872,6 @@ SEC("socket") __description("bounds refinement with single-value tnum on umax") __msg("3: (15) if r0 == 0xe0 {{.*}} R0=240") __success __log_level(2) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void bounds_refinement_tnum_umax(void *ctx) { asm volatile(" \ @@ -1907,7 +1901,6 @@ SEC("socket") __description("bounds refinement with single-value tnum on umin") __msg("3: (15) if r0 == 0xf0 {{.*}} R0=224") __success __log_level(2) -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void bounds_refinement_tnum_umin(void *ctx) { asm volatile(" \ @@ -2002,7 +1995,6 @@ __naked void bounds_refinement_multiple_overlaps(void *ctx) SEC("socket") __success -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void signed_unsigned_intersection32_case1(void *ctx) { asm volatile(" \ @@ -2020,7 +2012,6 @@ __naked void signed_unsigned_intersection32_case1(void *ctx) SEC("socket") __success -__flag(BPF_F_TEST_REG_INVARIANTS) __naked void signed_unsigned_intersection32_case2(void *ctx) { asm volatile(" \ @@ -2165,4 +2156,32 @@ l0_%=: r0 = 0; \ : __clobber_all); } +/* + * Last jump can be detected as always taken because the intersection of R5 and + * R7 32bit tnums produces a constant that isn't within R7's s32 bounds. + */ +SEC("socket") +__description("dead branch: tnums give impossible constant if equal") +__success +__naked void tnums_equal_impossible_constant(void *ctx) +{ + asm volatile(" \ + call %[bpf_get_prandom_u32]; \ + r5 = r0; \ + /* Set r5's var_off32 to (0; 0xfffffffc) */ \ + r5 &= 0xfffffffffffffffc; \ + r7 = r0; \ + /* Set r7's var_off32 to (0x0; 0x1) */ \ + r7 &= 0x1; \ + /* Now, s32=[-43; -42], var_off32=(0xffffffd4; 0x3) */ \ + r7 += -43; \ + /* On fallthrough, var_off32=-44, not in s32 */ \ + if w5 != w7 goto +1; \ + r10 = 0; \ + exit; \ +" : + : __imm(bpf_get_prandom_u32) + : __clobber_all); +} + char _license[] SEC("license") = "GPL"; |
