summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2018-09-20 20:09:44 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2018-10-03 07:39:59 +0200
commitae28f17b5eeb6702427ccb59e32e32a0c7e02f6b (patch)
tree635b0696501a6c21fe6527138292c869de3dabf6
parentpowerpc/mm/hugetlb/book3s: add _PAGE_PRESENT to hugepd pointer. (diff)
downloadlinux-ae28f17b5eeb6702427ccb59e32e32a0c7e02f6b.tar.xz
linux-ae28f17b5eeb6702427ccb59e32e32a0c7e02f6b.zip
powerpc/mm/book3s: Check for pmd_large instead of pmd_trans_huge
Update few code paths to check for pmd_large. set_pmd_at: We want to use this to store swap pte at pmd level. For swap ptes we don't want to set H_PAGE_THP_HUGE. Hence check for pmd_large in set_pmd_at. This remove the false WARN_ON when using this with swap pmd entry. pmd_page: We don't really use them on pmd migration entries. But they can also work with migration entries and we don't differentiate at the pte level. Hence update pmd_page to work with pmd migration entries too __find_linux_pte: lockless page table walk need to handle pmd migration entries. pmd_trans_huge check will return false on them. We don't set thp = 1 for such entries, but update hpage_shift correctly. Without this we will walk pmd migration entries as a pte page pointer which is wrong. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c8
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c2
-rw-r--r--arch/powerpc/mm/pgtable_64.c2
3 files changed, 8 insertions, 4 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index c6df73c66c40..9504641bd4d9 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -837,8 +837,12 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
ret_pte = (pte_t *) pmdp;
goto out;
}
-
- if (pmd_huge(pmd)) {
+ /*
+ * pmd_large check below will handle the swap pmd pte
+ * we need to do both the check because they are config
+ * dependent.
+ */
+ if (pmd_huge(pmd) || pmd_large(pmd)) {
ret_pte = (pte_t *) pmdp;
goto out;
} else if (is_hugepd(__hugepd(pmd_val(pmd))))
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index 654000da8b15..43e99e1d947b 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -75,7 +75,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
*/
WARN_ON(pte_val(pmd_pte(*pmdp)) & _PAGE_PRESENT);
assert_spin_locked(pmd_lockptr(mm, pmdp));
- WARN_ON(!(pmd_trans_huge(pmd) || pmd_devmap(pmd)));
+ WARN_ON(!(pmd_large(pmd) || pmd_devmap(pmd)));
#endif
trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 53e9eeecd5d4..e15e63079ba8 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -306,7 +306,7 @@ struct page *pud_page(pud_t pud)
*/
struct page *pmd_page(pmd_t pmd)
{
- if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
+ if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
return pte_page(pmd_pte(pmd));
return virt_to_page(pmd_page_vaddr(pmd));
}