summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2018-11-23 23:18:03 +0100
committerDaniel Borkmann <daniel@iogearbox.net>2018-12-05 16:36:28 +0100
commitdc002bb62f10c5905420f8b8a7d5ec0da567fc82 (patch)
tree2d5b812a9cb024a671bcda37431505d6dded1c47
parentMerge branch 'prog_test_run-improvement' (diff)
downloadlinux-dc002bb62f10c5905420f8b8a7d5ec0da567fc82.tar.xz
linux-dc002bb62f10c5905420f8b8a7d5ec0da567fc82.zip
bpf: add __weak hook for allocating executable memory
By default, BPF uses module_alloc() to allocate executable memory, but this is not necessary on all arches and potentially undesirable on some of them. So break out the module_alloc() and module_memfree() calls into __weak functions to allow them to be overridden in arch code. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--kernel/bpf/core.c14
1 files changed, 12 insertions, 2 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f93ed667546f..86817ab204e8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -623,6 +623,16 @@ static void bpf_jit_uncharge_modmem(u32 pages)
atomic_long_sub(pages, &bpf_jit_current);
}
+void *__weak bpf_jit_alloc_exec(unsigned long size)
+{
+ return module_alloc(size);
+}
+
+void __weak bpf_jit_free_exec(void *addr)
+{
+ module_memfree(addr);
+}
+
struct bpf_binary_header *
bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
unsigned int alignment,
@@ -640,7 +650,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
if (bpf_jit_charge_modmem(pages))
return NULL;
- hdr = module_alloc(size);
+ hdr = bpf_jit_alloc_exec(size);
if (!hdr) {
bpf_jit_uncharge_modmem(pages);
return NULL;
@@ -664,7 +674,7 @@ void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
u32 pages = hdr->pages;
- module_memfree(hdr);
+ bpf_jit_free_exec(hdr);
bpf_jit_uncharge_modmem(pages);
}