summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/dump_pagetables.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2018-04-17 15:27:16 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-04-17 15:43:01 +0200
commitd6ef1f194b7569af8b8397876dc9ab07649d63cb (patch)
tree9d7bd7b181f1d7f32abb559812163b8eba076d8b /arch/x86/mm/dump_pagetables.c
parentx86,sched: Allow topologies where NUMA nodes share an LLC (diff)
downloadlinux-d6ef1f194b7569af8b8397876dc9ab07649d63cb.tar.xz
linux-d6ef1f194b7569af8b8397876dc9ab07649d63cb.zip
x86/mm: Prevent kernel Oops in PTDUMP code with HIGHPTE=y
The walk_pte_level() function just uses __va to get the virtual address of the PTE page, but that breaks when the PTE page is not in the direct mapping with HIGHPTE=y. The result is an unhandled kernel paging request at some random address when accessing the current_kernel or current_user file. Use the correct API to access PTE pages. Fixes: fe770bf0310d ('x86: clean up the page table dumper and add 32-bit support') Signed-off-by: Joerg Roedel <jroedel@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org Cc: jgross@suse.com Cc: JBeulich@suse.com Cc: hpa@zytor.com Cc: aryabinin@virtuozzo.com Cc: kirill.shutemov@linux.intel.com Link: https://lkml.kernel.org/r/1523971636-4137-1-git-send-email-joro@8bytes.org
Diffstat (limited to 'arch/x86/mm/dump_pagetables.c')
-rw-r--r--arch/x86/mm/dump_pagetables.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 62a7e9f65dec..cc7ff5957194 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -18,6 +18,7 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
+#include <linux/highmem.h>
#include <asm/pgtable.h>
@@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
pgprotval_t eff_in, unsigned long P)
{
int i;
- pte_t *start;
+ pte_t *pte;
pgprotval_t prot, eff;
- start = (pte_t *)pmd_page_vaddr(addr);
for (i = 0; i < PTRS_PER_PTE; i++) {
- prot = pte_flags(*start);
- eff = effective_prot(eff_in, prot);
st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
+ pte = pte_offset_map(&addr, st->current_address);
+ prot = pte_flags(*pte);
+ eff = effective_prot(eff_in, prot);
note_page(m, st, __pgprot(prot), eff, 5);
- start++;
+ pte_unmap(pte);
}
}
#ifdef CONFIG_KASAN