summaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-07-26 00:45:50 +0200
committerVineet Gupta <vgupta@synopsys.com>2013-08-30 18:12:19 +0200
commit947bf103fcd2defa3bc4b7ebc6b05d0427bcde2d (patch)
tree549bdf5c9cdd5a9d4aa320bf4fbdf88b499f1f4b /arch/arc/mm
parentARC: [ASID] activate_mm() == switch_mm() (diff)
downloadlinux-947bf103fcd2defa3bc4b7ebc6b05d0427bcde2d.tar.xz
linux-947bf103fcd2defa3bc4b7ebc6b05d0427bcde2d.zip
ARC: [ASID] Track ASID allocation cycles/generations
This helps remove asid-to-mm reverse map While mm->context.id contains the ASID assigned to a process, our ASID allocator also used asid_mm_map[] reverse map. In a new allocation cycle (mm->ASID >= @asid_cache), the Round Robin ASID allocator used this to check if new @asid_cache belonged to some mm2 (from prev cycle). If so, it could locate that mm using the ASID reverse map, and mark that mm as unallocated ASID, to force it to refresh at the time of switch_mm() However, for SMP, the reverse map has to be maintained per CPU, so becomes 2 dimensional, hence got rid of it. With reverse map gone, it is NOT possible to reach out to current assignee. So we track the ASID allocation generation/cycle and on every switch_mm(), check if the current generation of CPU ASID is same as mm's ASID; If not it is refreshed. (Based loosely on arch/sh implementation) Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/tlb.c22
-rw-r--r--arch/arc/mm/tlbex.S5
2 files changed, 11 insertions, 16 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index b5c5e0aa0aaa..71cb26df4255 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -100,13 +100,7 @@
/* A copy of the ASID from the PID reg is kept in asid_cache */
-int asid_cache = FIRST_ASID;
-
-/* ASID to mm struct mapping. We have one extra entry corresponding to
- * NO_ASID to save us a compare when clearing the mm entry for old asid
- * see get_new_mmu_context (asm-arc/mmu_context.h)
- */
-struct mm_struct *asid_mm_map[NUM_ASID + 1];
+unsigned int asid_cache = MM_CTXT_FIRST_CYCLE;
/*
* Utility Routine to erase a J-TLB entry
@@ -281,7 +275,6 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
unsigned long flags;
- unsigned int asid;
/* If range @start to @end is more than 32 TLB entries deep,
* its better to move to a new ASID rather than searching for
@@ -303,11 +296,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
start &= PAGE_MASK;
local_irq_save(flags);
- asid = vma->vm_mm->context.asid;
- if (asid != NO_ASID) {
+ if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
while (start < end) {
- tlb_entry_erase(start | (asid & 0xff));
+ tlb_entry_erase(start | hw_pid(vma->vm_mm));
start += PAGE_SIZE;
}
}
@@ -361,9 +353,8 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
*/
local_irq_save(flags);
- if (vma->vm_mm->context.asid != NO_ASID) {
- tlb_entry_erase((page & PAGE_MASK) |
- (vma->vm_mm->context.asid & 0xff));
+ if (vma->vm_mm->context.asid != MM_CTXT_NO_ASID) {
+ tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm));
utlb_invalidate();
}
@@ -709,7 +700,8 @@ void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr)
* - SW needs to have a valid ASID
*/
if (addr < 0x70000000 &&
- ((mmu_asid != mm_asid) || (mm_asid == NO_ASID)))
+ ((mm_asid == MM_CTXT_NO_ASID) ||
+ (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK))))
print_asid_mismatch(mm_asid, mmu_asid, 0);
}
#endif
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 88897a112d55..cf7d7d9ad695 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -140,12 +140,15 @@ ex_saved_reg1:
GET_CURR_TASK_ON_CPU r3
ld r0, [r3, TASK_ACT_MM]
ld r0, [r0, MM_CTXT+MM_CTXT_ASID]
+ breq r0, 0, 55f ; Error if no ASID allocated
lr r1, [ARC_REG_PID]
and r1, r1, 0xFF
- breq r1, r0, 5f
+ and r2, r0, 0xFF ; MMU PID bits only for comparison
+ breq r1, r2, 5f
+55:
; Error if H/w and S/w ASID don't match, but NOT if in kernel mode
lr r2, [erstatus]
bbit0 r2, STATUS_U_BIT, 5f