summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorTao Chen <chen.dylane@linux.dev>2025-09-25 20:50:28 +0300
committerAndrii Nakryiko <andrii@kernel.org>2025-09-26 02:12:14 +0300
commit17f0d1f6321caa95699b8f96baf12e654d7b8d60 (patch)
treed1f316df789f191f7979ee675b68ec96e9a266fa /include/linux
parent105eb5dc74109a9f53c2f26c9a918d9347a73595 (diff)
downloadlinux-17f0d1f6321caa95699b8f96baf12e654d7b8d60.tar.xz
bpf: Add lookup_and_delete_elem for BPF_MAP_STACK_TRACE
The stacktrace map can be easily full, which will lead to failure in obtaining the stack. In addition to increasing the size of the map, another solution is to delete the stack_id after looking it up from the user, so extend the existing bpf_map_lookup_and_delete_elem() functionality to stacktrace map types. Signed-off-by: Tao Chen <chen.dylane@linux.dev> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20250925175030.1615837-1-chen.dylane@linux.dev
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index ea2ed6771cc6..6338e54a9b1f 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -2724,7 +2724,7 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
u64 flags);
-int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
+int bpf_stackmap_extract(struct bpf_map *map, void *key, void *value, bool delete);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);