diff options
author | Kefeng Wang <wangkefeng.wang@huawei.com> | 2024-09-23 15:13:50 +0200 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2024-10-16 13:01:53 +0200 |
commit | 7ffc13e233951f15728c9d09db3cc8d9f6cf81f2 (patch) | |
tree | 6998ed1b5355af319498a0a2c392f9fa00ddcf74 /arch/arm64/include/asm/tlbflush.h | |
parent | Linux 6.12-rc1 (diff) | |
download | linux-7ffc13e233951f15728c9d09db3cc8d9f6cf81f2.tar.xz linux-7ffc13e233951f15728c9d09db3cc8d9f6cf81f2.zip |
arm64: tlbflush: add __flush_tlb_range_limit_excess()
The __flush_tlb_range_limit_excess() helper will be used when
flush tlb kernel range soon.
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Link: https://lore.kernel.org/r/20240923131351.713304-2-wangkefeng.wang@huawei.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to '')
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 95fbc8c05607..5f5e7d1f2e7d 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -431,6 +431,23 @@ do { \ #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled()); +static inline bool __flush_tlb_range_limit_excess(unsigned long start, + unsigned long end, unsigned long pages, unsigned long stride) +{ + /* + * When the system does not support TLB range based flush + * operation, (MAX_DVM_OPS - 1) pages can be handled. But + * with TLB range based operation, MAX_TLBI_RANGE_PAGES + * pages can be handled. + */ + if ((!system_supports_tlb_range() && + (end - start) >= (MAX_DVM_OPS * stride)) || + pages > MAX_TLBI_RANGE_PAGES) + return true; + + return false; +} + static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, unsigned long start, unsigned long end, unsigned long stride, bool last_level, @@ -442,15 +459,7 @@ static inline void __flush_tlb_range_nosync(struct vm_area_struct *vma, end = round_up(end, stride); pages = (end - start) >> PAGE_SHIFT; - /* - * When not uses TLB range ops, we can handle up to - * (MAX_DVM_OPS - 1) pages; - * When uses TLB range ops, we can handle up to - * MAX_TLBI_RANGE_PAGES pages. - */ - if ((!system_supports_tlb_range() && - (end - start) >= (MAX_DVM_OPS * stride)) || - pages > MAX_TLBI_RANGE_PAGES) { + if (__flush_tlb_range_limit_excess(start, end, pages, stride)) { flush_tlb_mm(vma->vm_mm); return; } |