From 18594f9b8c45484bd527ebc6b08383b95f58ba73 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Mon, 4 May 2020 22:29:07 +1000 Subject: powerpc/64s/radix: Don't prefetch DAR in update_mmu_cache The idea behind this prefetch was to kick off a page table walk before returning from the fault, getting some pipelining advantage. But this never showed up any noticable performance advantage, and in fact with KUAP the prefetches are actually blocked and cause some kind of micro-architectural fault. Removing this improves page fault microbenchmark performance by about 9%. Signed-off-by: Nicholas Piggin [mpe: Keep the early return in update_mmu_cache()] Signed-off-by: Michael Ellerman Link: https://lore.kernel.org/r/20200504122907.49304-1-npiggin@gmail.com --- arch/powerpc/include/asm/book3s/64/pgtable.h | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'arch/powerpc/include') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index ec17fc343be0..c4b77fa0b9ad 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1159,8 +1159,11 @@ extern pmd_t mk_pmd(struct page *page, pgprot_t pgprot); extern pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot); extern void set_pmd_at(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp, pmd_t pmd); -extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd); +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long addr, pmd_t *pmd) +{ +} + extern int hash__has_transparent_hugepage(void); static inline int has_transparent_hugepage(void) { -- cgit v1.2.3