summaryrefslogtreecommitdiff
path: root/kernel/bpf/memalloc.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-09-03 00:10:55 +0300
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-05 16:33:06 +0300
commitdccb4a9013a68ddcb8303cd60f2fca1742014f3f (patch)
tree9a5493637aa7952ff2999ffb2c03cadeae56c0ff /kernel/bpf/memalloc.c
parent96da3f7d489d11b43e7c1af90d876b9a2492cca8 (diff)
downloadlinux-dccb4a9013a68ddcb8303cd60f2fca1742014f3f.tar.xz
bpf: Prepare bpf_mem_alloc to be used by sleepable bpf programs.
Use call_rcu_tasks_trace() to wait for sleepable progs to finish. Then use call_rcu() to wait for normal progs to finish and finally do free_one() on each element when freeing objects into global memory pool. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-14-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r--kernel/bpf/memalloc.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c
index f7b07787581b..8895c016dcdb 100644
--- a/kernel/bpf/memalloc.c
+++ b/kernel/bpf/memalloc.c
@@ -230,6 +230,13 @@ static void __free_rcu(struct rcu_head *head)
atomic_set(&c->call_rcu_in_progress, 0);
}
+static void __free_rcu_tasks_trace(struct rcu_head *head)
+{
+ struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
+
+ call_rcu(&c->rcu, __free_rcu);
+}
+
static void enque_to_free(struct bpf_mem_cache *c, void *obj)
{
struct llist_node *llnode = obj;
@@ -255,7 +262,11 @@ static void do_call_rcu(struct bpf_mem_cache *c)
* from __free_rcu() and from drain_mem_cache().
*/
__llist_add(llnode, &c->waiting_for_gp);
- call_rcu(&c->rcu, __free_rcu);
+ /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
+ * Then use call_rcu() to wait for normal progs to finish
+ * and finally do free_one() on each element.
+ */
+ call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace);
}
static void free_bulk(struct bpf_mem_cache *c)
@@ -457,6 +468,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
/* c->waiting_for_gp list was drained, but __free_rcu might
* still execute. Wait for it now before we free 'c'.
*/
+ rcu_barrier_tasks_trace();
rcu_barrier();
free_percpu(ma->cache);
ma->cache = NULL;
@@ -471,6 +483,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
}
if (c->objcg)
obj_cgroup_put(c->objcg);
+ rcu_barrier_tasks_trace();
rcu_barrier();
free_percpu(ma->caches);
ma->caches = NULL;