summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-15 18:53:43 +0200
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-06-26 08:55:36 +0200
commit6c16a74d423f584ed80815ee7b944f5b578dd37a (patch)
treec8ef820f92601b01ed5b90e840ed5123e51dfa0f /arch/powerpc
parentpowerpc/440: Fix warning early debug code (diff)
downloadlinux-6c16a74d423f584ed80815ee7b944f5b578dd37a.tar.xz
linux-6c16a74d423f584ed80815ee7b944f5b578dd37a.zip
powerpc/mm: Fix potential access to freed pages when using hugetlbfs
When using 64k page sizes, our PTE pages are split in two halves, the second half containing the "extension" used to keep track of individual 4k pages when not using HW 64k pages. However, our page tables used for hugetlb have a slightly different format and don't carry that "second half". Our code that batched PTEs to be invalidated unconditionally reads the "second half" (to put it into the batch), which means that when called to invalidate hugetlb PTEs, it will access unrelated memory. It breaks when CONFIG_DEBUG_PAGEALLOC is enabled. This fixes it by only accessing the second half when the _PAGE_COMBO bit is set in the first half, which indicates that we are dealing with a "combo" page which represents 16x4k subpages. Anything else shouldn't have this bit set and thus not require loading from the second half. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index e05d26fa372f..82b72207c51c 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,8 @@
* generic accessors and iterators here
*/
#define __real_pte(e,p) ((real_pte_t) { \
- (e), pte_val(*((p) + PTRS_PER_PTE)) })
+ (e), ((e) & _PAGE_COMBO) ? \
+ (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
(((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
#define __rpte_to_pte(r) ((r).pte)