summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/context.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2013-12-17 19:17:11 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-12-29 13:46:47 +0100
commite1a5848e3398dca135f3ae77fe2e01145f9d8826 (patch)
treeb022e51a78e1d923c4ba7ddd342ff209df154174 /arch/arm/mm/context.c
parentARM: PCI: add legacy IDE IRQ implementation (diff)
downloadlinux-e1a5848e3398dca135f3ae77fe2e01145f9d8826.tar.xz
linux-e1a5848e3398dca135f3ae77fe2e01145f9d8826.zip
ARM: 7924/1: mm: don't bother with reserved ttbr0 when running with LPAE
With the new ASID allocation algorithm, active ASIDs at the time of a rollover event will be marked as reserved, so active mm_structs can continue to operate with the same ASID as before. This in turn means that we don't need to worry about allocating a new ASID to an mm that is currently active (installed in TTBR0). Since updating the pgd and ASID is atomic on LPAE systems (by virtue of the two being fields in the same hardware register), we can dispose of the reserved TTBR0 and rely on whatever tables we currently have live. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/context.c')
-rw-r--r--arch/arm/mm/context.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 84e6f772e204..3ad0fdaa5cc1 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -78,20 +78,21 @@ void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
#endif
#ifdef CONFIG_ARM_LPAE
-static void cpu_set_reserved_ttbr0(void)
-{
- /*
- * Set TTBR0 to swapper_pg_dir which contains only global entries. The
- * ASID is set to 0.
- */
- cpu_set_ttbr(0, __pa(swapper_pg_dir));
- isb();
-}
+/*
+ * With LPAE, the ASID and page tables are updated atomicly, so there is
+ * no need for a reserved set of tables (the active ASID tracking prevents
+ * any issues across a rollover).
+ */
+#define cpu_set_reserved_ttbr0()
#else
static void cpu_set_reserved_ttbr0(void)
{
u32 ttb;
- /* Copy TTBR1 into TTBR0 */
+ /*
+ * Copy TTBR1 into TTBR0.
+ * This points at swapper_pg_dir, which contains only global
+ * entries so any speculative walks are perfectly safe.
+ */
asm volatile(
" mrc p15, 0, %0, c2, c0, 1 @ read TTBR1\n"
" mcr p15, 0, %0, c2, c0, 0 @ set TTBR0\n"