diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2023-08-02 17:13:54 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-08-25 01:20:24 +0200 |
commit | 1a10a44dfc1d55ba84987da1f8377258a044499c (patch) | |
tree | f49ad7b54a5b609ec76572a0bfcbd76d38045cd0 /arch/sparc/include | |
parent | sparc32: implement the new page table range API (diff) | |
download | linux-1a10a44dfc1d55ba84987da1f8377258a044499c.tar.xz linux-1a10a44dfc1d55ba84987da1f8377258a044499c.zip |
sparc64: implement the new page table range API
Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio() and
flush_icache_pages(). Convert the PG_dcache_dirty flag from being
per-page to per-folio.
Link: https://lkml.kernel.org/r/20230802151406.3735276-27-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'arch/sparc/include')
-rw-r--r-- | arch/sparc/include/asm/cacheflush_64.h | 18 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 29 |
2 files changed, 34 insertions, 13 deletions
diff --git a/arch/sparc/include/asm/cacheflush_64.h b/arch/sparc/include/asm/cacheflush_64.h index b9341836597e..a9a719f04d06 100644 --- a/arch/sparc/include/asm/cacheflush_64.h +++ b/arch/sparc/include/asm/cacheflush_64.h @@ -35,20 +35,26 @@ void flush_icache_range(unsigned long start, unsigned long end); void __flush_icache_page(unsigned long); void __flush_dcache_page(void *addr, int flush_icache); -void flush_dcache_page_impl(struct page *page); +void flush_dcache_folio_impl(struct folio *folio); #ifdef CONFIG_SMP -void smp_flush_dcache_page_impl(struct page *page, int cpu); -void flush_dcache_page_all(struct mm_struct *mm, struct page *page); +void smp_flush_dcache_folio_impl(struct folio *folio, int cpu); +void flush_dcache_folio_all(struct mm_struct *mm, struct folio *folio); #else -#define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page) -#define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page) +#define smp_flush_dcache_folio_impl(folio, cpu) flush_dcache_folio_impl(folio) +#define flush_dcache_folio_all(mm, folio) flush_dcache_folio_impl(folio) #endif void __flush_dcache_range(unsigned long start, unsigned long end); #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 -void flush_dcache_page(struct page *page); +void flush_dcache_folio(struct folio *folio); +#define flush_dcache_folio flush_dcache_folio +static inline void flush_dcache_page(struct page *page) +{ + flush_dcache_folio(page_folio(page)); +} #define flush_icache_page(vma, pg) do { } while(0) +#define flush_icache_pages(vma, pg, nr) do { } while(0) void flush_ptrace_access(struct vm_area_struct *, struct page *, unsigned long uaddr, void *kaddr, diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 5563efa1a19f..09aa37cc4469 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -86,6 +86,7 @@ extern unsigned long VMALLOC_END; #define vmemmap ((struct page *)VMEMMAP_BASE) #include <linux/sched.h> +#include <asm/tlbflush.h> bool kern_addr_valid(unsigned long addr); @@ -927,8 +928,21 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, maybe_tlb_batch_add(mm, addr, ptep, orig, fullmm, PAGE_SHIFT); } -#define set_pte_at(mm,addr,ptep,pte) \ - __set_pte_at((mm), (addr), (ptep), (pte), 0) +static inline void set_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned int nr) +{ + arch_enter_lazy_mmu_mode(); + for (;;) { + __set_pte_at(mm, addr, ptep, pte, 0); + if (--nr == 0) + break; + ptep++; + pte_val(pte) += PAGE_SIZE; + addr += PAGE_SIZE; + } + arch_leave_lazy_mmu_mode(); +} +#define set_ptes set_ptes #define pte_clear(mm,addr,ptep) \ set_pte_at((mm), (addr), (ptep), __pte(0UL)) @@ -947,8 +961,8 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, \ if (pfn_valid(this_pfn) && \ (((old_addr) ^ (new_addr)) & (1 << 13))) \ - flush_dcache_page_all(current->mm, \ - pfn_to_page(this_pfn)); \ + flush_dcache_folio_all(current->mm, \ + page_folio(pfn_to_page(this_pfn))); \ } \ newpte; \ }) @@ -963,7 +977,10 @@ struct seq_file; void mmu_info(struct seq_file *); struct vm_area_struct; -void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); +void update_mmu_cache_range(struct vm_fault *, struct vm_area_struct *, + unsigned long addr, pte_t *ptep, unsigned int nr); +#define update_mmu_cache(vma, addr, ptep) \ + update_mmu_cache_range(NULL, vma, addr, ptep, 1) #ifdef CONFIG_TRANSPARENT_HUGEPAGE void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, pmd_t *pmd); @@ -1121,8 +1138,6 @@ static inline bool pte_access_permitted(pte_t pte, bool write) } #define pte_access_permitted pte_access_permitted -#include <asm/tlbflush.h> - /* We provide our own get_unmapped_area to cope with VA holes and * SHM area cache aliasing for userland. */ |