summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKohei Enju <enjuk@amazon.com>2025-12-08 16:14:31 +0300
committerAlexei Starovoitov <ast@kernel.org>2025-12-10 10:53:27 +0300
commit48e11bad9a1fd803a22d25c9a7d3aced1ba87817 (patch)
tree7c2449734c47bc383f6e5727f57b7a47bda885bd
parent5d9fb42f05e5bea386e6648d5699c2beaabe1c6a (diff)
downloadlinux-48e11bad9a1fd803a22d25c9a7d3aced1ba87817.tar.xz
bpf: cpumap: propagate underlying error in cpu_map_update_elem()
After commit 9216477449f3 ("bpf: cpumap: Add the possibility to attach an eBPF program to cpumap"), __cpu_map_entry_alloc() may fail with errors other than -ENOMEM, such as -EBADF or -EINVAL. However, __cpu_map_entry_alloc() returns NULL on all failures, and cpu_map_update_elem() unconditionally converts this NULL into -ENOMEM. As a result, user space always receives -ENOMEM regardless of the actual underlying error. Examples of unexpected behavior: - Nonexistent fd : -ENOMEM (should be -EBADF) - Non-BPF fd : -ENOMEM (should be -EINVAL) - Bad attach type : -ENOMEM (should be -EINVAL) Change __cpu_map_entry_alloc() to return ERR_PTR(err) instead of NULL and have cpu_map_update_elem() propagate this error. Signed-off-by: Kohei Enju <enjuk@amazon.com> Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com> Link: https://lore.kernel.org/r/20251208131449.73036-2-enjuk@amazon.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--kernel/bpf/cpumap.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index 703e5df1f4ef..04171fbc39cb 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -430,7 +430,7 @@ static struct bpf_cpu_map_entry *
__cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
u32 cpu)
{
- int numa, err, i, fd = value->bpf_prog.fd;
+ int numa, err = -ENOMEM, i, fd = value->bpf_prog.fd;
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
struct bpf_cpu_map_entry *rcpu;
struct xdp_bulk_queue *bq;
@@ -440,7 +440,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
if (!rcpu)
- return NULL;
+ return ERR_PTR(err);
/* Alloc percpu bulkq */
rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
@@ -468,16 +468,21 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
rcpu->value.qsize = value->qsize;
gro_init(&rcpu->gro);
- if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
- goto free_ptr_ring;
+ if (fd > 0) {
+ err = __cpu_map_load_bpf_program(rcpu, map, fd);
+ if (err)
+ goto free_ptr_ring;
+ }
/* Setup kthread */
init_completion(&rcpu->kthread_running);
rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
"cpumap/%d/map:%d", cpu,
map->id);
- if (IS_ERR(rcpu->kthread))
+ if (IS_ERR(rcpu->kthread)) {
+ err = PTR_ERR(rcpu->kthread);
goto free_prog;
+ }
/* Make sure kthread runs on a single CPU */
kthread_bind(rcpu->kthread, cpu);
@@ -503,7 +508,7 @@ free_bulkq:
free_percpu(rcpu->bulkq);
free_rcu:
kfree(rcpu);
- return NULL;
+ return ERR_PTR(err);
}
static void __cpu_map_entry_free(struct work_struct *work)
@@ -596,8 +601,8 @@ static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
} else {
/* Updating qsize cause re-allocation of bpf_cpu_map_entry */
rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
- if (!rcpu)
- return -ENOMEM;
+ if (IS_ERR(rcpu))
+ return PTR_ERR(rcpu);
}
rcu_read_lock();
__cpu_map_entry_replace(cmap, key_cpu, rcpu);