diff options
author | Will Deacon <will.deacon@arm.com> | 2017-01-12 16:04:29 +0100 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2017-01-12 16:04:29 +0100 |
commit | 42d1a731ffee44b16c06df371ffdbb14135c11e3 (patch) | |
tree | 8d78d86e34dfd7ec44377df098371f322c463152 | |
parent | arm64: Documentation - Expose CPU feature registers (diff) | |
parent | drivers: firmware: psci: Use __pa_symbol for kernel symbol (diff) | |
download | linux-42d1a731ffee44b16c06df371ffdbb14135c11e3.tar.xz linux-42d1a731ffee44b16c06df371ffdbb14135c11e3.zip |
Merge branch 'aarch64/for-next/debug-virtual' into aarch64/for-next/core
Merge core DEBUG_VIRTUAL changes from Laura Abbott. Later arm and arm64
support depends on these.
* aarch64/for-next/debug-virtual:
drivers: firmware: psci: Use __pa_symbol for kernel symbol
mm/usercopy: Switch to using lm_alias
mm/kasan: Switch to using __pa_symbol and lm_alias
kexec: Switch to __pa_symbol
mm: Introduce lm_alias
mm/cma: Cleanup highmem check
lib/Kconfig.debug: Add ARCH_HAS_DEBUG_VIRTUAL
-rw-r--r-- | arch/x86/Kconfig | 1 | ||||
-rw-r--r-- | drivers/firmware/psci.c | 2 | ||||
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | kernel/kexec_core.c | 2 | ||||
-rw-r--r-- | lib/Kconfig.debug | 5 | ||||
-rw-r--r-- | mm/cma.c | 15 | ||||
-rw-r--r-- | mm/kasan/kasan_init.c | 15 | ||||
-rw-r--r-- | mm/usercopy.c | 4 |
8 files changed, 26 insertions, 22 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e487493bbd47..f1d4e8f2131f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -46,6 +46,7 @@ config X86 select ARCH_CLOCKSOURCE_DATA select ARCH_DISCARD_MEMBLOCK select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_FAST_MULTIPLIER diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c index 6c60a5087caf..66a8793f3b37 100644 --- a/drivers/firmware/psci.c +++ b/drivers/firmware/psci.c @@ -383,7 +383,7 @@ static int psci_suspend_finisher(unsigned long index) u32 *state = __this_cpu_read(psci_power_state); return psci_ops.cpu_suspend(state[index - 1], - virt_to_phys(cpu_resume)); + __pa_symbol(cpu_resume)); } int psci_cpu_suspend_enter(unsigned long index) diff --git a/include/linux/mm.h b/include/linux/mm.h index fe6b4036664a..5dc9c4650522 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -76,6 +76,10 @@ extern int mmap_rnd_compat_bits __read_mostly; #define page_to_virt(x) __va(PFN_PHYS(page_to_pfn(x))) #endif +#ifndef lm_alias +#define lm_alias(x) __va(__pa_symbol(x)) +#endif + /* * To prevent common memory management code establishing * a zero page mapping on a read fault. diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index 5617cc412444..a01974e1bf6b 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -1399,7 +1399,7 @@ void __weak arch_crash_save_vmcoreinfo(void) phys_addr_t __weak paddr_vmcoreinfo_note(void) { - return __pa((unsigned long)(char *)&vmcoreinfo_note); + return __pa_symbol((unsigned long)(char *)&vmcoreinfo_note); } static int __init crash_save_vmcoreinfo_init(void) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b06848a104e6..2aed31608b86 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -622,9 +622,12 @@ config DEBUG_VM_PGFLAGS If unsure, say N. +config ARCH_HAS_DEBUG_VIRTUAL + bool + config DEBUG_VIRTUAL bool "Debug VM translations" - depends on DEBUG_KERNEL && X86 + depends on DEBUG_KERNEL && ARCH_HAS_DEBUG_VIRTUAL help Enable some costly sanity checks in virtual to page code. This can catch mistakes with virt_to_page() and friends. @@ -235,18 +235,13 @@ int __init cma_declare_contiguous(phys_addr_t base, phys_addr_t highmem_start; int ret = 0; -#ifdef CONFIG_X86 /* - * high_memory isn't direct mapped memory so retrieving its physical - * address isn't appropriate. But it would be useful to check the - * physical address of the highmem boundary so it's justifiable to get - * the physical address from it. On x86 there is a validation check for - * this case, so the following workaround is needed to avoid it. + * We can't use __pa(high_memory) directly, since high_memory + * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly) + * complain. Find the boundary by adding one to the last valid + * address. */ - highmem_start = __pa_nodebug(high_memory); -#else - highmem_start = __pa(high_memory); -#endif + highmem_start = __pa(high_memory - 1) + 1; pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c index 3f9a41cf0ac6..31238dad85fb 100644 --- a/mm/kasan/kasan_init.c +++ b/mm/kasan/kasan_init.c @@ -15,6 +15,7 @@ #include <linux/kasan.h> #include <linux/kernel.h> #include <linux/memblock.h> +#include <linux/mm.h> #include <linux/pfn.h> #include <asm/page.h> @@ -49,7 +50,7 @@ static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr, pte_t *pte = pte_offset_kernel(pmd, addr); pte_t zero_pte; - zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL); + zero_pte = pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page)), PAGE_KERNEL); zero_pte = pte_wrprotect(zero_pte); while (addr + PAGE_SIZE <= end) { @@ -69,7 +70,7 @@ static void __init zero_pmd_populate(pud_t *pud, unsigned long addr, next = pmd_addr_end(addr, end); if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) { - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); continue; } @@ -92,9 +93,9 @@ static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr, if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) { pmd_t *pmd; - pud_populate(&init_mm, pud, kasan_zero_pmd); + pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); pmd = pmd_offset(pud, addr); - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); continue; } @@ -135,11 +136,11 @@ void __init kasan_populate_zero_shadow(const void *shadow_start, * puds,pmds, so pgd_populate(), pud_populate() * is noops. */ - pgd_populate(&init_mm, pgd, kasan_zero_pud); + pgd_populate(&init_mm, pgd, lm_alias(kasan_zero_pud)); pud = pud_offset(pgd, addr); - pud_populate(&init_mm, pud, kasan_zero_pmd); + pud_populate(&init_mm, pud, lm_alias(kasan_zero_pmd)); pmd = pmd_offset(pud, addr); - pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte); + pmd_populate_kernel(&init_mm, pmd, lm_alias(kasan_zero_pte)); continue; } diff --git a/mm/usercopy.c b/mm/usercopy.c index 3c8da0af9695..8345299e3e3b 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -108,13 +108,13 @@ static inline const char *check_kernel_text_object(const void *ptr, * __pa() is not just the reverse of __va(). This can be detected * and checked: */ - textlow_linear = (unsigned long)__va(__pa(textlow)); + textlow_linear = (unsigned long)lm_alias(textlow); /* No different mapping: we're done. */ if (textlow_linear == textlow) return NULL; /* Check the secondary mapping... */ - texthigh_linear = (unsigned long)__va(__pa(texthigh)); + texthigh_linear = (unsigned long)lm_alias(texthigh); if (overlaps(ptr, n, textlow_linear, texthigh_linear)) return "<linear kernel text>"; |