diff options
author | Alexei Starovoitov <ast@kernel.org> | 2023-07-06 05:34:42 +0200 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2023-07-12 23:45:23 +0200 |
commit | 04fabf00b4d3aff5d010ecb617001814e409e24a (patch) | |
tree | cf96390c0f04b284a514a4313441abd88eb8ee3e /kernel/bpf/memalloc.c | |
parent | bpf: Add a hint to allocated objects. (diff) | |
download | linux-04fabf00b4d3aff5d010ecb617001814e409e24a.tar.xz linux-04fabf00b4d3aff5d010ecb617001814e409e24a.zip |
bpf: Allow reuse from waiting_for_gp_ttrace list.
alloc_bulk() can reuse elements from free_by_rcu_ttrace.
Let it reuse from waiting_for_gp_ttrace as well to avoid unnecessary kmalloc().
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20230706033447.54696-10-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/memalloc.c')
-rw-r--r-- | kernel/bpf/memalloc.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/bpf/memalloc.c b/kernel/bpf/memalloc.c index 9986c6b7df4d..e5a87f6cf2cc 100644 --- a/kernel/bpf/memalloc.c +++ b/kernel/bpf/memalloc.c @@ -212,6 +212,15 @@ static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) if (i >= cnt) return; + for (; i < cnt; i++) { + obj = llist_del_first(&c->waiting_for_gp_ttrace); + if (!obj) + break; + add_obj_to_free_list(c, obj); + } + if (i >= cnt) + return; + memcg = get_memcg(c); old_memcg = set_active_memcg(memcg); for (; i < cnt; i++) { @@ -295,12 +304,7 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c) WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) - /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access. - * It doesn't race with llist_del_all either. - * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace): - * from __free_rcu() and from drain_mem_cache(). - */ - __llist_add(llnode, &c->waiting_for_gp_ttrace); + llist_add(llnode, &c->waiting_for_gp_ttrace); if (unlikely(READ_ONCE(c->draining))) { __free_rcu(&c->rcu_ttrace); |