summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/percpu_freelist.c
diff options
context:
space:
mode:
authorXu Kuohai <xukuohai@huawei.com>2022-11-10 13:21:28 +0100
committerAndrii Nakryiko <andrii@kernel.org>2022-11-11 21:05:14 +0100
commit4b45cd81f737d79d0fbfc0d320a1e518e7f0bbf0 (patch)
tree29b8acb33922e5d8489342c09dfecc6cb0fe539f /kernel/bpf/percpu_freelist.c
parentMerge branch 'Fix offset when fault occurs in strncpy_from_kernel_nofault()' (diff)
downloadlinux-4b45cd81f737d79d0fbfc0d320a1e518e7f0bbf0.tar.xz
linux-4b45cd81f737d79d0fbfc0d320a1e518e7f0bbf0.zip
bpf: Initialize same number of free nodes for each pcpu_freelist
pcpu_freelist_populate() initializes nr_elems / num_possible_cpus() + 1 free nodes for some CPUs, and then possibly one CPU with fewer nodes, followed by remaining cpus with 0 nodes. For example, when nr_elems == 256 and num_possible_cpus() == 32, CPU 0~27 each gets 9 free nodes, CPU 28 gets 4 free nodes, CPU 29~31 get 0 free nodes, while in fact each CPU should get 8 nodes equally. This patch initializes nr_elems / num_possible_cpus() free nodes for each CPU firstly, then allocates the remaining free nodes by one for each CPU until no free nodes left. Fixes: e19494edab82 ("bpf: introduce percpu_freelist") Signed-off-by: Xu Kuohai <xukuohai@huawei.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20221110122128.105214-1-xukuohai@huawei.com
Diffstat (limited to 'kernel/bpf/percpu_freelist.c')
-rw-r--r--kernel/bpf/percpu_freelist.c23
1 files changed, 11 insertions, 12 deletions
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index b6e7f5c5b9ab..034cf87b54e9 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -100,22 +100,21 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems)
{
struct pcpu_freelist_head *head;
- int i, cpu, pcpu_entries;
+ unsigned int cpu, cpu_idx, i, j, n, m;
- pcpu_entries = nr_elems / num_possible_cpus() + 1;
- i = 0;
+ n = nr_elems / num_possible_cpus();
+ m = nr_elems % num_possible_cpus();
+ cpu_idx = 0;
for_each_possible_cpu(cpu) {
-again:
head = per_cpu_ptr(s->freelist, cpu);
- /* No locking required as this is not visible yet. */
- pcpu_freelist_push_node(head, buf);
- i++;
- buf += elem_size;
- if (i == nr_elems)
- break;
- if (i % pcpu_entries)
- goto again;
+ j = n + (cpu_idx < m ? 1 : 0);
+ for (i = 0; i < j; i++) {
+ /* No locking required as this is not visible yet. */
+ pcpu_freelist_push_node(head, buf);
+ buf += elem_size;
+ }
+ cpu_idx++;
}
}