summaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-10-06 19:46:23 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2015-10-07 12:45:27 +0200
commit8e63d38876691756f9bc6930850f1fb77809be1b (patch)
tree74f5a739675a90c71d1f07e937c2e3f512781b7e /arch/arm64/mm
parentarm64: proc: de-scope TLBI operation during cold boot (diff)
downloadlinux-8e63d38876691756f9bc6930850f1fb77809be1b.tar.xz
linux-8e63d38876691756f9bc6930850f1fb77809be1b.zip
arm64: flush: use local TLB and I-cache invalidation
There are a number of places where a single CPU is running with a private page-table and we need to perform maintenance on the TLB and I-cache in order to ensure correctness, but do not require the operation to be broadcast to other CPUs. This patch adds local variants of tlb_flush_all and __flush_icache_all to support these use-cases and updates the callers respectively. __local_flush_icache_all also implies an isb, since it is intended to be used synchronously. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: David Daney <david.daney@cavium.com> Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/mm')
-rw-r--r--arch/arm64/mm/context.c4
-rw-r--r--arch/arm64/mm/mmu.c2
2 files changed, 3 insertions, 3 deletions
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index d70ff14dbdbd..48b53fb381af 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -48,9 +48,9 @@ static void flush_context(void)
{
/* set the reserved TTBR0 before flushing the TLB */
cpu_set_reserved_ttbr0();
- flush_tlb_all();
+ local_flush_tlb_all();
if (icache_is_aivivt())
- __flush_icache_all();
+ __local_flush_icache_all();
}
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 9211b8527f25..71a310478c9e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -456,7 +456,7 @@ void __init paging_init(void)
* point to zero page to avoid speculatively fetching new entries.
*/
cpu_set_reserved_ttbr0();
- flush_tlb_all();
+ local_flush_tlb_all();
cpu_set_default_tcr_t0sz();
}