diff options
author | Alexei Starovoitov <ast@kernel.org> | 2022-09-02 23:10:55 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2022-09-05 15:33:06 +0200 |
commit | dccb4a9013a68ddcb8303cd60f2fca1742014f3f (patch) | |
tree | 9a5493637aa7952ff2999ffb2c03cadeae56c0ff /kernel/bpf | |
parent | bpf: Remove tracing program restriction on map types (diff) | |
download | linux-dccb4a9013a68ddcb8303cd60f2fca1742014f3f.tar.xz linux-dccb4a9013a68ddcb8303cd60f2fca1742014f3f.zip |
bpf: Prepare bpf_mem_alloc to be used by sleepable bpf programs.
Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
Then use call_rcu() to wait for normal progs to finish
and finally do free_one() on each element when freeing objects
into global memory pool.
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20220902211058.60789-14-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf')
-rw-r--r-- | kernel/bpf/memalloc.c | 15 |
1 files changed, 14 insertions, 1 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index f7b07787581b..8895c016dcdb 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -230,6 +230,13 @@ static void __free_rcu(struct rcu_head *head) atomic_set(&c->call_rcu_in_progress, 0); } +static void __free_rcu_tasks_trace(struct rcu_head *head) +{ + struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); + + call_rcu(&c->rcu, __free_rcu); +} + static void enque_to_free(struct bpf_mem_cache *c, void *obj) { struct llist_node *llnode = obj; @@ -255,7 +262,11 @@ static void do_call_rcu(struct bpf_mem_cache *c) * from __free_rcu() and from drain_mem_cache(). */ __llist_add(llnode, &c->waiting_for_gp); - call_rcu(&c->rcu, __free_rcu); + /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. + * Then use call_rcu() to wait for normal progs to finish + * and finally do free_one() on each element. + */ + call_rcu_tasks_trace(&c->rcu, __free_rcu_tasks_trace); } static void free_bulk(struct bpf_mem_cache *c) @@ -457,6 +468,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) /* c->waiting_for_gp list was drained, but __free_rcu might * still execute. Wait for it now before we free 'c'. */ + rcu_barrier_tasks_trace(); rcu_barrier(); free_percpu(ma->cache); ma->cache = NULL; @@ -471,6 +483,7 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) } if (c->objcg) obj_cgroup_put(c->objcg); + rcu_barrier_tasks_trace(); rcu_barrier(); free_percpu(ma->caches); ma->caches = NULL; |