diff options
author | Rik van Riel <riel@surriel.com> | 2018-09-26 05:58:42 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2018-10-09 16:51:12 +0200 |
commit | 016c4d92cd16f569c6485ae62b076c1a4b779536 (patch) | |
tree | b734bacfc71fe52ac281da272536d5156ec8a252 /arch/x86/include/asm/tlbflush.h | |
parent | smp,cpumask: introduce on_each_cpu_cond_mask (diff) | |
download | linux-016c4d92cd16f569c6485ae62b076c1a4b779536.tar.xz linux-016c4d92cd16f569c6485ae62b076c1a4b779536.zip |
x86/mm/tlb: Add freed_tables argument to flush_tlb_mm_range
Add an argument to flush_tlb_mm_range to indicate whether page tables
are about to be freed after this TLB flush. This allows for an
optimization of flush_tlb_mm_range to skip CPUs in lazy TLB mode.
No functional changes.
Cc: npiggin@gmail.com
Cc: mingo@kernel.org
Cc: will.deacon@arm.com
Cc: songliubraving@fb.com
Cc: kernel-team@fb.com
Cc: luto@kernel.org
Cc: hpa@zytor.com
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: http://lkml.kernel.org/r/20180926035844.1420-6-riel@surriel.com
Diffstat (limited to 'arch/x86/include/asm/tlbflush.h')
-rw-r--r-- | arch/x86/include/asm/tlbflush.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d6c0cd9e9591..1dea9860ce5b 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -536,22 +536,24 @@ struct flush_tlb_info { #define local_flush_tlb() __flush_tlb() -#define flush_tlb_mm(mm) flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL) +#define flush_tlb_mm(mm) \ + flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true) #define flush_tlb_range(vma, start, end) \ flush_tlb_mm_range((vma)->vm_mm, start, end, \ ((vma)->vm_flags & VM_HUGETLB) \ ? huge_page_shift(hstate_vma(vma)) \ - : PAGE_SHIFT) + : PAGE_SHIFT, false) extern void flush_tlb_all(void); extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, - unsigned long end, unsigned int stride_shift); + unsigned long end, unsigned int stride_shift, + bool freed_tables); extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a) { - flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT); + flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false); } void native_flush_tlb_others(const struct cpumask *cpumask, |