From ed6a79352cad00e9a49d6e438be40e45107207bf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 Aug 2018 14:46:08 +0200 Subject: asm-generic/tlb, arch: Provide CONFIG_HAVE_MMU_GATHER_PAGE_SIZE Move the mmu_gather::page_size things into the generic code instead of PowerPC specific bits. No change in behavior intended. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Will Deacon Cc: Andrew Morton Cc: Andy Lutomirski Cc: Aneesh Kumar K.V Cc: Borislav Petkov Cc: Dave Hansen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Nick Piggin Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- mm/huge_memory.c | 4 ++-- mm/hugetlb.c | 2 +- mm/madvise.c | 2 +- mm/memory.c | 4 ++-- mm/mmu_gather.c | 5 +++++ 5 files changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 404acdcd0455..76b75112a259 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1641,7 +1641,7 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, struct mm_struct *mm = tlb->mm; bool ret = false; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = pmd_trans_huge_lock(pmd, vma); if (!ptl) @@ -1717,7 +1717,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t orig_pmd; spinlock_t *ptl; - tlb_remove_check_page_size_change(tlb, HPAGE_PMD_SIZE); + tlb_change_page_size(tlb, HPAGE_PMD_SIZE); ptl = __pmd_trans_huge_lock(pmd, vma); if (!ptl) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 97b1e0290c66..3fc37a626b52 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3353,7 +3353,7 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, * This is a hugetlb vma, all the pte entries should point * to huge page. */ - tlb_remove_check_page_size_change(tlb, sz); + tlb_change_page_size(tlb, sz); tlb_start_vma(tlb, vma); /* diff --git a/mm/madvise.c b/mm/madvise.c index 21a7881a2db4..bb3a4554d5d5 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -328,7 +328,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, if (pmd_trans_unstable(pmd)) return 0; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); diff --git a/mm/memory.c b/mm/memory.c index ab650c21bccd..1aa5c03566f1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -356,7 +356,7 @@ void free_pgd_range(struct mmu_gather *tlb, * We add page table cache pages with PAGE_SIZE, * (see pte_free_tlb()), flush the tlb if we need */ - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); pgd = pgd_offset(tlb->mm, addr); do { next = pgd_addr_end(addr, end); @@ -1046,7 +1046,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, pte_t *pte; swp_entry_t entry; - tlb_remove_check_page_size_change(tlb, PAGE_SIZE); + tlb_change_page_size(tlb, PAGE_SIZE); again: init_rss_vec(rss); start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c index f2f03c655807..14dfc97155e4 100644 --- a/mm/mmu_gather.c +++ b/mm/mmu_gather.c @@ -58,7 +58,9 @@ void arch_tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, #ifdef CONFIG_HAVE_RCU_TABLE_FREE tlb->batch = NULL; #endif +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE tlb->page_size = 0; +#endif __tlb_reset_range(tlb); } @@ -121,7 +123,10 @@ bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_ struct mmu_gather_batch *batch; VM_BUG_ON(!tlb->end); + +#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE VM_WARN_ON(tlb->page_size != page_size); +#endif batch = tlb->active; /* -- cgit v1.2.3