diff options
author | Eric Dumazet <edumazet@google.com> | 2017-12-12 23:22:39 +0100 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2017-12-13 00:27:22 +0100 |
commit | 9147efcbe0b7cc96b18eb64b1a3f0d4bba81443c (patch) | |
tree | e96ef5a6e898fd45ca4d48f467f71fa505ff807e /kernel | |
parent | Merge branch 'bpf-misc-fixes' (diff) | |
download | linux-9147efcbe0b7cc96b18eb64b1a3f0d4bba81443c.tar.xz linux-9147efcbe0b7cc96b18eb64b1a3f0d4bba81443c.zip |
bpf: add schedule points to map alloc/free
While using large percpu maps, htab_map_alloc() can hold
cpu for hundreds of ms.
This patch adds cond_resched() calls to percpu alloc/free
call sites, all running in process context.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/bpf/hashtab.c | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index e469e05c8e83..3905d4bc5b80 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -114,6 +114,7 @@ static void htab_free_elems(struct bpf_htab *htab) pptr = htab_elem_get_ptr(get_htab_elem(htab, i), htab->map.key_size); free_percpu(pptr); + cond_resched(); } free_elems: bpf_map_area_free(htab->elems); @@ -159,6 +160,7 @@ static int prealloc_init(struct bpf_htab *htab) goto free_elems; htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size, pptr); + cond_resched(); } skip_percpu_elems: |