summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-04-11 02:37:34 +0200
committerScott Wood <scottwood@freescale.com>2015-04-15 22:24:22 +0200
commit50c6a665b383cb5839e45d04e36faeeefaffa052 (patch)
tree300d70280629baa31706090fdb6806e5d3bce237
parentLinux 4.0 (diff)
downloadlinux-50c6a665b383cb5839e45d04e36faeeefaffa052.tar.xz
linux-50c6a665b383cb5839e45d04e36faeeefaffa052.zip
powerpc/hugetlb: Call mm_dec_nr_pmds() in hugetlb_free_pmd_range()
Commit dc6c9a35b66b5 ("mm: account pmd page tables to the process") added a counter that is incremented whenever a PMD is allocated and decremented whenever a PMD is freed. For hugepages on PPC, common code is used to allocated PMDs, but arch-specific code is used to free PMDs. This results in kernel output such as "BUG: non-zero nr_pmds on freeing mm: 1" when using hugepages. Update the PPC hugepage PMD freeing code to decrement the count, just as the above commit did for free_pmd_range(). Fixes: dc6c9a35b66b5 ("mm: account pmd page tables to the process") Signed-off-by: Scott Wood <scottwood@freescale.com> Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Cc: stable@vger.kernel.org # 4.0.x
-rw-r--r--arch/powerpc/mm/hugetlbpage.c1
1 files changed, 1 insertions, 0 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..cecbe00cee24 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -581,6 +581,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
pmd = pmd_offset(pud, start);
pud_clear(pud);
pmd_free_tlb(tlb, pmd, start);
+ mm_dec_nr_pmds(tlb->mm);
}
static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,