diff options
author | Alexandre Ghiti <alexghiti@rivosinc.com> | 2023-10-30 14:30:25 +0100 |
---|---|---|
committer | Palmer Dabbelt <palmer@rivosinc.com> | 2023-11-06 16:20:49 +0100 |
commit | c5e9b2c2ae82231d85d9650854e7b3e97dde33da (patch) | |
tree | bfb3fa46a8ba4f983d8489127d2613643854fead /arch | |
parent | Linux 6.6-rc1 (diff) | |
download | linux-c5e9b2c2ae82231d85d9650854e7b3e97dde33da.tar.xz linux-c5e9b2c2ae82231d85d9650854e7b3e97dde33da.zip |
riscv: Improve tlb_flush()
For now, tlb_flush() simply calls flush_tlb_mm() which results in a
flush of the whole TLB. So let's use mmu_gather fields to provide a more
fine-grained flush of the TLB.
Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
Reviewed-by: Samuel Holland <samuel.holland@sifive.com>
Tested-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com> # On RZ/Five SMARC
Link: https://lore.kernel.org/r/20231030133027.19542-2-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/riscv/include/asm/tlb.h | 8 | ||||
-rw-r--r-- | arch/riscv/include/asm/tlbflush.h | 3 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 7 |
3 files changed, 17 insertions, 1 deletions
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8a8..1eb5682b2af6 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index a09196f8de68..f5c4fb0ae642 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) #if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } #define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() #endif /* !CONFIG_SMP || !CONFIG_MMU */ /* Flush a range of kernel pages */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 77be59aadc73..fa03289853d8 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) __flush_tlb_range(mm, 0, -1, PAGE_SIZE); } +void flush_tlb_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned int page_size) +{ + __flush_tlb_range(mm, start, end - start, page_size); +} + void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); |