summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorAndrei Matei <andreimatei1@gmail.com>2021-02-07 04:10:24 +0300
committerAlexei Starovoitov <ast@kernel.org>2021-02-10 21:44:19 +0300
commit01f810ace9ed37255f27608a0864abebccf0aab3 (patch)
tree29e82aae31d2889d160c73201aa6d3514ccd059a /include
parentee5cc0363ea0d587f62349ff3b3e2dfa751832e4 (diff)
downloadlinux-01f810ace9ed37255f27608a0864abebccf0aab3.tar.xz
bpf: Allow variable-offset stack access
Before this patch, variable offset access to the stack was dissalowed for regular instructions, but was allowed for "indirect" accesses (i.e. helpers). This patch removes the restriction, allowing reading and writing to the stack through stack pointers with variable offsets. This makes stack-allocated buffers more usable in programs, and brings stack pointers closer to other types of pointers. The motivation is being able to use stack-allocated buffers for data manipulation. When the stack size limit is sufficient, allocating buffers on the stack is simpler than per-cpu arrays, or other alternatives. In unpriviledged programs, variable-offset reads and writes are disallowed (they were already disallowed for the indirect access case) because the speculative execution checking code doesn't support them. Additionally, when writing through a variable-offset stack pointer, if any pointers are in the accessible range, there's possilibities of later leaking pointers because the write cannot be tracked precisely. Writes with variable offset mark the whole range as initialized, even though we don't know which stack slots are actually written. This is in order to not reject future reads to these slots. Note that this doesn't affect writes done through helpers; like before, helpers need the whole stack range to be initialized to begin with. All the stack slots are in range are considered scalars after the write; variable-offset register spills are not tracked. For reads, all the stack slots in the variable range needs to be initialized (but see above about what writes do), otherwise the read is rejected. All register spilled in stack slots that might be read are marked as having been read, however reads through such pointers don't do register filling; the target register will always be either a scalar or a constant zero. Signed-off-by: Andrei Matei <andreimatei1@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20210207011027.676572-2-andreimatei1@gmail.com
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h5
-rw-r--r--include/linux/bpf_verifier.h3
2 files changed, 7 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 321966fc35db..079162bbd387 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -1290,6 +1290,11 @@ static inline bool bpf_allow_ptr_leaks(void)
return perfmon_capable();
}
+static inline bool bpf_allow_uninit_stack(void)
+{
+ return perfmon_capable();
+}
+
static inline bool bpf_allow_ptr_to_map_access(void)
{
return perfmon_capable();
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index dfe6f85d97dd..532c97836d0d 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -195,7 +195,7 @@ struct bpf_func_state {
* 0 = main function, 1 = first callee.
*/
u32 frameno;
- /* subprog number == index within subprog_stack_depth
+ /* subprog number == index within subprog_info
* zero == main subprog
*/
u32 subprogno;
@@ -404,6 +404,7 @@ struct bpf_verifier_env {
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
bool allow_ptr_leaks;
+ bool allow_uninit_stack;
bool allow_ptr_to_map_access;
bool bpf_capable;
bool bypass_spec_v1;