diff options
author | Yonghong Song <yonghong.song@linux.dev> | 2023-12-22 04:18:07 +0100 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2024-01-04 06:08:26 +0100 |
commit | 21f5a801c171dff4e728e38f62cf626c4197d07c (patch) | |
tree | 876d4710deae0593e8aa7c6ad61b95295ddd3880 /tools/testing/selftests/bpf/progs/test_bpf_ma.c | |
parent | bpf: Limit up to 512 bytes for bpf_global_percpu_ma allocation (diff) | |
download | linux-21f5a801c171dff4e728e38f62cf626c4197d07c.tar.xz linux-21f5a801c171dff4e728e38f62cf626c4197d07c.zip |
selftests/bpf: Cope with 512 bytes limit with bpf_global_percpu_ma
In the previous patch, the maximum data size for bpf_global_percpu_ma
is 512 bytes. This breaks selftest test_bpf_ma. The test is adjusted
in two aspects:
- Since the maximum allowed data size for bpf_global_percpu_ma is
512, remove all tests beyond that, names sizes 1024, 2048 and 4096.
- Previously the percpu data size is bucket_size - 8 in order to
avoid percpu allocation into the next bucket. This patch removed
such data size adjustment thanks to Patch 1.
Also, a better way to generate BTF type is used than adding
a member to the value struct.
Acked-by: Hou Tao <houtao1@huawei.com>
Signed-off-by: Yonghong Song <yonghong.song@linux.dev>
Link: https://lore.kernel.org/r/20231222031807.1292853-1-yonghong.song@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/testing/selftests/bpf/progs/test_bpf_ma.c')
-rw-r--r-- | tools/testing/selftests/bpf/progs/test_bpf_ma.c | 66 |
1 files changed, 33 insertions, 33 deletions
diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index b78f4f702ae0..3494ca30fa7f 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -20,6 +20,9 @@ char _license[] SEC("license") = "GPL"; const unsigned int data_sizes[] = {16, 32, 64, 96, 128, 192, 256, 512, 1024, 2048, 4096}; const volatile unsigned int data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; +const unsigned int percpu_data_sizes[] = {8, 16, 32, 64, 96, 128, 192, 256, 512}; +const volatile unsigned int percpu_data_btf_ids[ARRAY_SIZE(data_sizes)] = {}; + int err = 0; u32 pid = 0; @@ -27,10 +30,10 @@ u32 pid = 0; struct bin_data_##_size { \ char data[_size - sizeof(void *)]; \ }; \ + /* See Commit 5d8d6634ccc, force btf generation for type bin_data_##_size */ \ + struct bin_data_##_size *__bin_data_##_size; \ struct map_value_##_size { \ struct bin_data_##_size __kptr * data; \ - /* To emit BTF info for bin_data_xx */ \ - struct bin_data_##_size not_used; \ }; \ struct { \ __uint(type, BPF_MAP_TYPE_ARRAY); \ @@ -40,8 +43,12 @@ u32 pid = 0; } array_##_size SEC(".maps") #define DEFINE_ARRAY_WITH_PERCPU_KPTR(_size) \ + struct percpu_bin_data_##_size { \ + char data[_size]; \ + }; \ + struct percpu_bin_data_##_size *__percpu_bin_data_##_size; \ struct map_value_percpu_##_size { \ - struct bin_data_##_size __percpu_kptr * data; \ + struct percpu_bin_data_##_size __percpu_kptr * data; \ }; \ struct { \ __uint(type, BPF_MAP_TYPE_ARRAY); \ @@ -114,7 +121,7 @@ static __always_inline void batch_percpu_alloc(struct bpf_map *map, unsigned int return; } /* per-cpu allocator may not be able to refill in time */ - new = bpf_percpu_obj_new_impl(data_btf_ids[idx], NULL); + new = bpf_percpu_obj_new_impl(percpu_data_btf_ids[idx], NULL); if (!new) continue; @@ -179,7 +186,7 @@ DEFINE_ARRAY_WITH_KPTR(1024); DEFINE_ARRAY_WITH_KPTR(2048); DEFINE_ARRAY_WITH_KPTR(4096); -/* per-cpu kptr doesn't support bin_data_8 which is a zero-sized array */ +DEFINE_ARRAY_WITH_PERCPU_KPTR(8); DEFINE_ARRAY_WITH_PERCPU_KPTR(16); DEFINE_ARRAY_WITH_PERCPU_KPTR(32); DEFINE_ARRAY_WITH_PERCPU_KPTR(64); @@ -188,9 +195,6 @@ DEFINE_ARRAY_WITH_PERCPU_KPTR(128); DEFINE_ARRAY_WITH_PERCPU_KPTR(192); DEFINE_ARRAY_WITH_PERCPU_KPTR(256); DEFINE_ARRAY_WITH_PERCPU_KPTR(512); -DEFINE_ARRAY_WITH_PERCPU_KPTR(1024); -DEFINE_ARRAY_WITH_PERCPU_KPTR(2048); -DEFINE_ARRAY_WITH_PERCPU_KPTR(4096); SEC("?fentry/" SYS_PREFIX "sys_nanosleep") int test_batch_alloc_free(void *ctx) @@ -246,20 +250,18 @@ int test_batch_percpu_alloc_free(void *ctx) if ((u32)bpf_get_current_pid_tgid() != pid) return 0; - /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, - * then free 128 16-bytes per-cpu objects in batch to trigger freeing. + /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling, + * then free 128 8-bytes per-cpu objects in batch to trigger freeing. */ - CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 0); - CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 1); - CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 2); - CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 3); - CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 4); - CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 5); - CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 6); - CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 7); - CALL_BATCH_PERCPU_ALLOC_FREE(1024, 32, 8); - CALL_BATCH_PERCPU_ALLOC_FREE(2048, 16, 9); - CALL_BATCH_PERCPU_ALLOC_FREE(4096, 8, 10); + CALL_BATCH_PERCPU_ALLOC_FREE(8, 128, 0); + CALL_BATCH_PERCPU_ALLOC_FREE(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC_FREE(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC_FREE(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC_FREE(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC_FREE(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC_FREE(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC_FREE(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC_FREE(512, 64, 8); return 0; } @@ -270,20 +272,18 @@ int test_percpu_free_through_map_free(void *ctx) if ((u32)bpf_get_current_pid_tgid() != pid) return 0; - /* Alloc 128 16-bytes per-cpu objects in batch to trigger refilling, + /* Alloc 128 8-bytes per-cpu objects in batch to trigger refilling, * then free these object through map free. */ - CALL_BATCH_PERCPU_ALLOC(16, 128, 0); - CALL_BATCH_PERCPU_ALLOC(32, 128, 1); - CALL_BATCH_PERCPU_ALLOC(64, 128, 2); - CALL_BATCH_PERCPU_ALLOC(96, 128, 3); - CALL_BATCH_PERCPU_ALLOC(128, 128, 4); - CALL_BATCH_PERCPU_ALLOC(192, 128, 5); - CALL_BATCH_PERCPU_ALLOC(256, 128, 6); - CALL_BATCH_PERCPU_ALLOC(512, 64, 7); - CALL_BATCH_PERCPU_ALLOC(1024, 32, 8); - CALL_BATCH_PERCPU_ALLOC(2048, 16, 9); - CALL_BATCH_PERCPU_ALLOC(4096, 8, 10); + CALL_BATCH_PERCPU_ALLOC(8, 128, 0); + CALL_BATCH_PERCPU_ALLOC(16, 128, 1); + CALL_BATCH_PERCPU_ALLOC(32, 128, 2); + CALL_BATCH_PERCPU_ALLOC(64, 128, 3); + CALL_BATCH_PERCPU_ALLOC(96, 128, 4); + CALL_BATCH_PERCPU_ALLOC(128, 128, 5); + CALL_BATCH_PERCPU_ALLOC(192, 128, 6); + CALL_BATCH_PERCPU_ALLOC(256, 128, 7); + CALL_BATCH_PERCPU_ALLOC(512, 64, 8); return 0; } |