summaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2015-10-06 19:46:24 +0200
committerCatalin Marinas <catalin.marinas@arm.com>2015-10-07 12:55:41 +0200
commit5aec715d7d3122f77cabaa7578d9d25a0c1ed20e (patch)
tree8d75ae3f1f72bfa8ee77fdea406b6c9dcfaf4e60 /arch/arm64/include/asm
parentarm64: flush: use local TLB and I-cache invalidation (diff)
downloadlinux-5aec715d7d3122f77cabaa7578d9d25a0c1ed20e.tar.xz
linux-5aec715d7d3122f77cabaa7578d9d25a0c1ed20e.zip
arm64: mm: rewrite ASID allocator and MM context-switching code
Our current switch_mm implementation suffers from a number of problems: (1) The ASID allocator relies on IPIs to synchronise the CPUs on a rollover event (2) Because of (1), we cannot allocate ASIDs with interrupts disabled and therefore make use of a TIF_SWITCH_MM flag to postpone the actual switch to finish_arch_post_lock_switch (3) We run context switch with a reserved (invalid) TTBR0 value, even though the ASID and pgd are updated atomically (4) We take a global spinlock (cpu_asid_lock) during context-switch (5) We use h/w broadcast TLB operations when they are not required (e.g. in flush_context) This patch addresses these problems by rewriting the ASID algorithm to match the bitmap-based arch/arm/ implementation more closely. This in turn allows us to remove much of the complications surrounding switch_mm, including the ugly thread flag. Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r--arch/arm64/include/asm/mmu.h15
-rw-r--r--arch/arm64/include/asm/mmu_context.h76
-rw-r--r--arch/arm64/include/asm/thread_info.h1
3 files changed, 19 insertions, 73 deletions
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 030208767185..990124a67eeb 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -17,15 +17,16 @@
#define __ASM_MMU_H
typedef struct {
- unsigned int id;
- raw_spinlock_t id_lock;
- void *vdso;
+ atomic64_t id;
+ void *vdso;
} mm_context_t;
-#define INIT_MM_CONTEXT(name) \
- .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
-
-#define ASID(mm) ((mm)->context.id & 0xffff)
+/*
+ * This macro is only used by the TLBI code, which cannot race with an
+ * ASID change and therefore doesn't need to reload the counter using
+ * atomic64_read.
+ */
+#define ASID(mm) ((mm)->context.id.counter & 0xffff)
extern void paging_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 549b89554ce8..f4c74a951b6c 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -28,13 +28,6 @@
#include <asm/cputype.h>
#include <asm/pgtable.h>
-#define MAX_ASID_BITS 16
-
-extern unsigned int cpu_last_asid;
-
-void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-void __new_context(struct mm_struct *mm);
-
#ifdef CONFIG_PID_IN_CONTEXTIDR
static inline void contextidr_thread_switch(struct task_struct *next)
{
@@ -96,66 +89,19 @@ static inline void cpu_set_default_tcr_t0sz(void)
: "r"(TCR_T0SZ(VA_BITS)), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH));
}
-static inline void switch_new_context(struct mm_struct *mm)
-{
- unsigned long flags;
-
- __new_context(mm);
-
- local_irq_save(flags);
- cpu_switch_mm(mm->pgd, mm);
- local_irq_restore(flags);
-}
-
-static inline void check_and_switch_context(struct mm_struct *mm,
- struct task_struct *tsk)
-{
- /*
- * Required during context switch to avoid speculative page table
- * walking with the wrong TTBR.
- */
- cpu_set_reserved_ttbr0();
-
- if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
- /*
- * The ASID is from the current generation, just switch to the
- * new pgd. This condition is only true for calls from
- * context_switch() and interrupts are already disabled.
- */
- cpu_switch_mm(mm->pgd, mm);
- else if (irqs_disabled())
- /*
- * Defer the new ASID allocation until after the context
- * switch critical region since __new_context() cannot be
- * called with interrupts disabled.
- */
- set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
- else
- /*
- * That is a direct call to switch_mm() or activate_mm() with
- * interrupts enabled and a new context.
- */
- switch_new_context(mm);
-}
-
-#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
+/*
+ * It would be nice to return ASIDs back to the allocator, but unfortunately
+ * that introduces a race with a generation rollover where we could erroneously
+ * free an ASID allocated in a future generation. We could workaround this by
+ * freeing the ASID from the context of the dying mm (e.g. in arch_exit_mmap),
+ * but we'd then need to make sure that we didn't dirty any TLBs afterwards.
+ * Setting a reserved TTBR0 or EPD0 would work, but it all gets ugly when you
+ * take CPU migration into account.
+ */
#define destroy_context(mm) do { } while(0)
+void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
-#define finish_arch_post_lock_switch \
- finish_arch_post_lock_switch
-static inline void finish_arch_post_lock_switch(void)
-{
- if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
- struct mm_struct *mm = current->mm;
- unsigned long flags;
-
- __new_context(mm);
-
- local_irq_save(flags);
- cpu_switch_mm(mm->pgd, mm);
- local_irq_restore(flags);
- }
-}
+#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
/*
* This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h
index dcd06d18a42a..555c6dec5ef2 100644
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
@@ -111,7 +111,6 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_RESTORE_SIGMASK 20
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
-#define TIF_SWITCH_MM 23 /* deferred switch_mm */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)