summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNitin Gupta <nitin.m.gupta@oracle.com>2017-04-18 00:46:41 +0200
committerDavid S. Miller <davem@davemloft.net>2017-04-18 22:11:07 +0200
commit544f8f935863c5a9ca3e34306ea3316095e7b7bf (patch)
tree2447f4641b6a4cfec7600b168b95421343ff2de6 /arch
parentsparc64: Use LOCKDEP_SMALL, not PROVE_LOCKING_SMALL (diff)
downloadlinux-544f8f935863c5a9ca3e34306ea3316095e7b7bf.tar.xz
linux-544f8f935863c5a9ca3e34306ea3316095e7b7bf.zip
sparc64: Fix hugepage page table free
Make sure the start adderess is aligned to PMD_SIZE boundary when freeing page table backing a hugepage region. The issue was causing segfaults when a region backed by 64K pages was unmapped since such a region is in general not PMD_SIZE aligned. Signed-off-by: Nitin Gupta <nitin.m.gupta@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/mm/hugetlbpage.c16
1 files changed, 16 insertions, 0 deletions
diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
index ee5273ad918d..7c29d38e6b99 100644
--- a/arch/sparc/mm/hugetlbpage.c
+++ b/arch/sparc/mm/hugetlbpage.c
@@ -461,6 +461,22 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
pgd_t *pgd;
unsigned long next;
+ addr &= PMD_MASK;
+ if (addr < floor) {
+ addr += PMD_SIZE;
+ if (!addr)
+ return;
+ }
+ if (ceiling) {
+ ceiling &= PMD_MASK;
+ if (!ceiling)
+ return;
+ }
+ if (end - 1 > ceiling - 1)
+ end -= PMD_SIZE;
+ if (addr > end - 1)
+ return;
+
pgd = pgd_offset(tlb->mm, addr);
do {
next = pgd_addr_end(addr, end);