diff options
author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2015-03-30 07:11:04 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-04-17 03:23:40 +0200 |
commit | 7d6e7f7ffaba4e013c7a0589140431799bc17985 (patch) | |
tree | e562ffe3cd9799537221f9277e36787412ef2f40 | |
parent | powerpc/mm/thp: Make page table walk safe against thp split/collapse (diff) | |
download | linux-7d6e7f7ffaba4e013c7a0589140431799bc17985.tar.xz linux-7d6e7f7ffaba4e013c7a0589140431799bc17985.zip |
powerpc/mm/thp: Return pte address if we find trans_splitting.
For THP that is marked trans splitting, we return the pte.
This require the callers to handle the pmd_trans_splitting scenario,
if they care. All the current callers are either looking at pfn or
write_ok, hence we don't need to update them.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
-rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 12 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 9 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hugetlbpage.c | 9 |
4 files changed, 10 insertions, 22 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 23b724c746e9..ce5610aa124c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -281,11 +281,9 @@ static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type) /* * If it's present and writable, atomically set dirty and referenced bits and - * return the PTE, otherwise return 0. If we find a transparent hugepage - * and if it is marked splitting we return 0; + * return the PTE, otherwise return 0. */ -static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, - unsigned int hugepage) +static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing) { pte_t old_pte, new_pte = __pte(0); @@ -301,12 +299,6 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing, cpu_relax(); continue; } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* If hugepage and is trans splitting return None */ - if (unlikely(hugepage && - pmd_trans_splitting(pte_pmd(old_pte)))) - return __pte(0); -#endif /* If pte is not present return None */ if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT))) return __pte(0); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 26df3864d85a..0fe9c92e78ed 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -537,20 +537,17 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, } /* if the guest wants write access, see if that is OK */ if (!writing && hpte_is_writable(r)) { - unsigned int hugepage_shift; pte_t *ptep, pte; unsigned long flags; - /* * We need to protect against page table destruction - * while looking up and updating the pte. + * hugepage split and collapse. */ local_irq_save(flags); ptep = find_linux_pte_or_hugepte(current->mm->pgd, - hva, &hugepage_shift); + hva, NULL); if (ptep) { - pte = kvmppc_read_update_linux_pte(ptep, 1, - hugepage_shift); + pte = kvmppc_read_update_linux_pte(ptep, 1); if (pte_write(pte)) write_ok = 1; } diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index f559b25de173..d839f08cb903 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -219,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, local_irq_restore(flags); return H_PARAMETER; } - pte = kvmppc_read_update_linux_pte(ptep, writing, hpage_shift); + pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a9dbb27ca887..0ce968b00b7c 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -1014,12 +1014,11 @@ pte_t *__find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, * A hugepage collapse is captured by pmd_none, because * it mark the pmd none and do a hpte invalidate. * - * A hugepage split is captured by pmd_trans_splitting - * because we mark the pmd trans splitting and do a - * hpte invalidate - * + * We don't worry about pmd_trans_splitting here, The + * caller if it needs to handle the splitting case + * should check for that. */ - if (pmd_none(pmd) || pmd_trans_splitting(pmd)) + if (pmd_none(pmd)) return NULL; if (pmd_huge(pmd) || pmd_large(pmd)) { |