summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2020-01-14 18:54:02 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2020-01-27 12:37:41 +0100
commit47febbeeec440eec213960e3d25c57a8312d5340 (patch)
treecee805de32b544e2dfaa9807607119f8bbca0908 /arch/powerpc/mm
parentpowerpc/kconfig: Move CONFIG_PPC32 into Kconfig.cputype (diff)
downloadlinux-47febbeeec440eec213960e3d25c57a8312d5340.tar.xz
linux-47febbeeec440eec213960e3d25c57a8312d5340.zip
powerpc/32: Force KASAN_VMALLOC for modules
Unloading/Reloading of modules seems to fail with KASAN_VMALLOC but works properly with it. Force selection of KASAN_VMALLOC when MODULES are selected, and drop module_alloc() which was dedicated to KASAN for modules. Reported-by: <erhard_f@mailbox.org> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://bugzilla.kernel.org/show_bug.cgi?id=205283 Link: https://lore.kernel.org/r/f909da11aecb59ab7f32ba01fae6f356eaa4d7bc.1579024426.git.christophe.leroy@c-s.fr
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c31
1 files changed, 5 insertions, 26 deletions
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 88036fb88350..b782d92622b4 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -12,7 +12,7 @@
#include <asm/code-patching.h>
#include <mm/mmu_decl.h>
-static pgprot_t kasan_prot_ro(void)
+static pgprot_t __init kasan_prot_ro(void)
{
if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
return PAGE_READONLY;
@@ -20,7 +20,7 @@ static pgprot_t kasan_prot_ro(void)
return PAGE_KERNEL_RO;
}
-static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
+static void __init kasan_populate_pte(pte_t *ptep, pgprot_t prot)
{
unsigned long va = (unsigned long)kasan_early_shadow_page;
phys_addr_t pa = __pa(kasan_early_shadow_page);
@@ -30,7 +30,7 @@ static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
__set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
}
-static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
+static int __init kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
{
pmd_t *pmd;
unsigned long k_cur, k_next;
@@ -70,7 +70,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
return 0;
}
-static void __ref *kasan_get_one_page(void)
+static void __init *kasan_get_one_page(void)
{
if (slab_is_available())
return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
@@ -78,7 +78,7 @@ static void __ref *kasan_get_one_page(void)
return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
}
-static int __ref kasan_init_region(void *start, size_t size)
+static int __init kasan_init_region(void *start, size_t size)
{
unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
@@ -196,27 +196,6 @@ void __init kasan_late_init(void)
kasan_unmap_early_shadow_vmalloc();
}
-#if defined(CONFIG_MODULES) && !defined(CONFIG_KASAN_VMALLOC)
-void *module_alloc(unsigned long size)
-{
- void *base;
-
- base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
- GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
- NUMA_NO_NODE, __builtin_return_address(0));
-
- if (!base)
- return NULL;
-
- if (!kasan_init_region(base, size))
- return base;
-
- vfree(base);
-
- return NULL;
-}
-#endif
-
#ifdef CONFIG_PPC_BOOK3S_32
u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};