diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-07-24 22:53:45 +0200 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-08-30 18:12:18 +0200 |
commit | 3daa48d1d9bc44baa079d65e72ef2e3f1139ac03 (patch) | |
tree | 2e659049c5ee5584c789cc42643aa653cfdb307d /arch/arc | |
parent | ARC: [ASID] Refactor the TLB paranoid debug code (diff) | |
download | linux-3daa48d1d9bc44baa079d65e72ef2e3f1139ac03.tar.xz linux-3daa48d1d9bc44baa079d65e72ef2e3f1139ac03.zip |
ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID
ASID allocation changes/1
This patch does 2 things:
(1) get_new_mmu_context() NOW moves mm->ASID to a new value ONLY if it
was from a prev allocation cycle/generation OR if mm had no ASID
allocated (vs. before would unconditionally moving to a new ASID)
Callers desiring unconditional update of ASID, e.g.local_flush_tlb_mm()
(for parent's address space invalidation at fork) need to first force
the parent to an unallocated ASID.
(2) get_new_mmu_context() always sets the MMU PID reg with unchanged/new
ASID value.
The gains are:
- consolidation of all asid alloc logic into get_new_mmu_context()
- avoiding code duplication in switch_mm() for PID reg setting
- Enables future change to fold activate_mm() into switch_mm()
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/include/asm/mmu_context.h | 45 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 13 |
2 files changed, 25 insertions, 33 deletions
diff --git a/arch/arc/include/asm/mmu_context.h b/arch/arc/include/asm/mmu_context.h index a63800fd1dba..7a3ecd25ffc9 100644 --- a/arch/arc/include/asm/mmu_context.h +++ b/arch/arc/include/asm/mmu_context.h @@ -69,8 +69,8 @@ extern struct mm_struct *asid_mm_map[NUM_ASID + 1]; extern int asid_cache; /* - * Assign a new ASID to task. If the task already has an ASID, it is - * relinquished. + * Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle) + * Also set the MMU PID register to existing/updated ASID */ static inline void get_new_mmu_context(struct mm_struct *mm) { @@ -80,6 +80,17 @@ static inline void get_new_mmu_context(struct mm_struct *mm) local_irq_save(flags); /* + * Move to new ASID if it was not from current alloc-cycle/generation. + * + * Note: Callers needing new ASID unconditionally, independent of + * generation, e.g. local_flush_tlb_mm() for forking parent, + * first need to destroy the context, setting it to invalid + * value. + */ + if (mm->context.asid <= asid_cache) + goto set_hw; + + /* * Relinquish the currently owned ASID (if any). * Doing unconditionally saves a cmp-n-branch; for already unused * ASID slot, the value was/remains NULL @@ -99,9 +110,9 @@ static inline void get_new_mmu_context(struct mm_struct *mm) * task with ASID from prev allocation cycle (before ASID roll-over). * * This might look wrong - if we are re-using some other task's ASID, - * won't we use it's stale TLB entries too. Actually switch_mm( ) takes + * won't we use it's stale TLB entries too. Actually the algorithm takes * care of such a case: it ensures that task with ASID from prev alloc - * cycle, when scheduled will refresh it's ASID: see switch_mm( ) below + * cycle, when scheduled will refresh it's ASID * The stealing scenario described here will only happen if that task * didn't get a chance to refresh it's ASID - implying stale entries * won't exist. @@ -114,7 +125,8 @@ static inline void get_new_mmu_context(struct mm_struct *mm) asid_mm_map[asid_cache] = mm; mm->context.asid = asid_cache; - write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE); +set_hw: + write_aux_reg(ARC_REG_PID, mm->context.asid | MMU_ENABLE); local_irq_restore(flags); } @@ -141,28 +153,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd); #endif - /* - * Get a new ASID if task doesn't have a valid one. Possible when - * -task never had an ASID (fresh after fork) - * -it's ASID was stolen - past an ASID roll-over. - * -There's a third obscure scenario (if this task is running for the - * first time afer an ASID rollover), where despite having a valid - * ASID, we force a get for new ASID - see comments at top. - * - * Both the non-alloc scenario and first-use-after-rollover can be - * detected using the single condition below: NO_ASID = 256 - * while asid_cache is always a valid ASID value (0-255). - */ - if (next->context.asid > asid_cache) { - get_new_mmu_context(next); - } else { - /* - * XXX: This will never happen given the chks above - * BUG_ON(next->context.asid > MAX_ASID); - */ - write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE); - } - + get_new_mmu_context(next); } static inline void destroy_context(struct mm_struct *mm) diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index a4ad68c4b50d..b5c5e0aa0aaa 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -258,13 +258,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) return; /* - * Workaround for Android weirdism: - * A binder VMA could end up in a task such that vma->mm != tsk->mm - * old code would cause h/w - s/w ASID to get out of sync + * - Move to a new ASID, but only if the mm is still wired in + * (Android Binder ended up calling this for vma->mm != tsk->mm, + * causing h/w - s/w ASID to get out of sync) + * - Also get_new_mmu_context() new implementation allocates a new + * ASID only if it is not allocated already - so unallocate first */ - if (current->mm != mm) - destroy_context(mm); - else + destroy_context(mm); + if (current->mm == mm) get_new_mmu_context(mm); } |