summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMykyta Yatsenko <yatsenko@meta.com>2026-03-31 01:27:56 +0300
committerAlexei Starovoitov <ast@kernel.org>2026-04-02 19:31:42 +0300
commit90f51ebff242e9446a9bbd3ce1a04d50c30e8b96 (patch)
treef8c13bdd197761d7e07e101ab4eec8828d0de234
parentf7601044020ac1c55675ee59422ef75b25a31bed (diff)
downloadlinux-90f51ebff242e9446a9bbd3ce1a04d50c30e8b96.tar.xz
bpf: Migrate bpf_task_work to kmalloc_nolock
Replace bpf_mem_alloc/bpf_mem_free with kmalloc_nolock/kfree_rcu for bpf_task_work_ctx. Replace guard(rcu_tasks_trace)() with guard(rcu)() in bpf_task_work_irq(). The function only accesses ctx struct members (not map values), so tasks trace protection is not needed - regular RCU is sufficient since ctx is freed via kfree_rcu. The guard in bpf_task_work_callback() remains as tasks trace since it accesses map values from process context. Sleepable BPF programs hold rcu_read_lock_trace but not regular rcu_read_lock. Since kfree_rcu waits for a regular RCU grace period, the ctx memory can be freed while a sleepable program is still running. Add scoped_guard(rcu) around the pointer read and refcount tryget in bpf_task_work_acquire_ctx to close this race window. Since kfree_rcu uses call_rcu internally which is not safe from NMI context, defer destruction via irq_work when irqs are disabled. For the lost-cmpxchg path the ctx was never published, so kfree_nolock is safe. Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Link: https://lore.kernel.org/r/20260330-kmalloc_special-v2-1-c90403f92ff0@meta.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--kernel/bpf/helpers.c56
1 files changed, 37 insertions, 19 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 2d8538bf4cfa..48680dbd4a02 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -4295,17 +4295,25 @@ static bool bpf_task_work_ctx_tryget(struct bpf_task_work_ctx *ctx)
return refcount_inc_not_zero(&ctx->refcnt);
}
+static void bpf_task_work_destroy(struct irq_work *irq_work)
+{
+ struct bpf_task_work_ctx *ctx = container_of(irq_work, struct bpf_task_work_ctx, irq_work);
+
+ bpf_task_work_ctx_reset(ctx);
+ kfree_rcu(ctx, rcu);
+}
+
static void bpf_task_work_ctx_put(struct bpf_task_work_ctx *ctx)
{
if (!refcount_dec_and_test(&ctx->refcnt))
return;
- bpf_task_work_ctx_reset(ctx);
-
- /* bpf_mem_free expects migration to be disabled */
- migrate_disable();
- bpf_mem_free(&bpf_global_ma, ctx);
- migrate_enable();
+ if (irqs_disabled()) {
+ ctx->irq_work = IRQ_WORK_INIT(bpf_task_work_destroy);
+ irq_work_queue(&ctx->irq_work);
+ } else {
+ bpf_task_work_destroy(&ctx->irq_work);
+ }
}
static void bpf_task_work_cancel(struct bpf_task_work_ctx *ctx)
@@ -4359,7 +4367,7 @@ static void bpf_task_work_irq(struct irq_work *irq_work)
enum bpf_task_work_state state;
int err;
- guard(rcu_tasks_trace)();
+ guard(rcu)();
if (cmpxchg(&ctx->state, BPF_TW_PENDING, BPF_TW_SCHEDULING) != BPF_TW_PENDING) {
bpf_task_work_ctx_put(ctx);
@@ -4381,9 +4389,9 @@ static void bpf_task_work_irq(struct irq_work *irq_work)
/*
* It's technically possible for just scheduled task_work callback to
* complete running by now, going SCHEDULING -> RUNNING and then
- * dropping its ctx refcount. Instead of capturing extra ref just to
- * protected below ctx->state access, we rely on RCU protection to
- * perform below SCHEDULING -> SCHEDULED attempt.
+ * dropping its ctx refcount. Instead of capturing an extra ref just
+ * to protect below ctx->state access, we rely on rcu_read_lock
+ * above to prevent kfree_rcu from freeing ctx before we return.
*/
state = cmpxchg(&ctx->state, BPF_TW_SCHEDULING, BPF_TW_SCHEDULED);
if (state == BPF_TW_FREED)
@@ -4400,7 +4408,7 @@ static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *t
if (ctx)
return ctx;
- ctx = bpf_mem_alloc(&bpf_global_ma, sizeof(struct bpf_task_work_ctx));
+ ctx = bpf_map_kmalloc_nolock(map, sizeof(*ctx), 0, NUMA_NO_NODE);
if (!ctx)
return ERR_PTR(-ENOMEM);
@@ -4414,7 +4422,7 @@ static struct bpf_task_work_ctx *bpf_task_work_fetch_ctx(struct bpf_task_work *t
* tw->ctx is set by concurrent BPF program, release allocated
* memory and try to reuse already set context.
*/
- bpf_mem_free(&bpf_global_ma, ctx);
+ kfree_nolock(ctx);
return old_ctx;
}
@@ -4426,13 +4434,23 @@ static struct bpf_task_work_ctx *bpf_task_work_acquire_ctx(struct bpf_task_work
{
struct bpf_task_work_ctx *ctx;
- ctx = bpf_task_work_fetch_ctx(tw, map);
- if (IS_ERR(ctx))
- return ctx;
-
- /* try to get ref for task_work callback to hold */
- if (!bpf_task_work_ctx_tryget(ctx))
- return ERR_PTR(-EBUSY);
+ /*
+ * Sleepable BPF programs hold rcu_read_lock_trace but not
+ * regular rcu_read_lock. Since kfree_rcu waits for regular
+ * RCU GP, the ctx can be freed while we're between reading
+ * the pointer and incrementing the refcount. Take regular
+ * rcu_read_lock to prevent kfree_rcu from freeing the ctx
+ * before we can tryget it.
+ */
+ scoped_guard(rcu) {
+ ctx = bpf_task_work_fetch_ctx(tw, map);
+ if (IS_ERR(ctx))
+ return ctx;
+
+ /* try to get ref for task_work callback to hold */
+ if (!bpf_task_work_ctx_tryget(ctx))
+ return ERR_PTR(-EBUSY);
+ }
if (cmpxchg(&ctx->state, BPF_TW_STANDBY, BPF_TW_PENDING) != BPF_TW_STANDBY) {
/* lost acquiring race or map_release_uref() stole it from us, put ref and bail */