summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/tlbflush.h
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2018-08-22 22:40:30 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2018-09-11 17:49:10 +0200
commit45a284bc5ee3d629b6da1498c2273cb22361416e (patch)
treebc6dd47bd4a105654b6fd8821e513cf22b7e698e /arch/arm64/include/asm/tlbflush.h
parentarm64: tlb: Use last-level invalidation in flush_tlb_kernel_range() (diff)
downloadlinux-45a284bc5ee3d629b6da1498c2273cb22361416e.tar.xz
linux-45a284bc5ee3d629b6da1498c2273cb22361416e.zip
arm64: tlb: Add DSB ISHST prior to TLBI in __flush_tlb_[kernel_]pgtable()
__flush_tlb_[kernel_]pgtable() rely on set_pXd() having a DSB after writing the new table entry and therefore avoid the barrier prior to the TLBI instruction. In preparation for delaying our walk-cache invalidation on the unmap() path, move the DSB into the TLB invalidation routines. Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/tlbflush.h')
-rw-r--r--arch/arm64/include/asm/tlbflush.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
index 7e2a35424ca4..e257f8655b84 100644
--- a/arch/arm64/include/asm/tlbflush.h
+++ b/arch/arm64/include/asm/tlbflush.h
@@ -213,6 +213,7 @@ static inline void __flush_tlb_pgtable(struct mm_struct *mm,
{
unsigned long addr = __TLBI_VADDR(uaddr, ASID(mm));
+ dsb(ishst);
__tlbi(vae1is, addr);
__tlbi_user(vae1is, addr);
dsb(ish);
@@ -222,6 +223,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
{
unsigned long addr = __TLBI_VADDR(kaddr, 0);
+ dsb(ishst);
__tlbi(vaae1is, addr);
dsb(ish);
}