summaryrefslogtreecommitdiffstats
path: root/arch/mips/lib
diff options
context:
space:
mode:
authorPaul Burton <paul.burton@mips.com>2019-02-02 02:43:28 +0100
committerPaul Burton <paul.burton@mips.com>2019-02-04 19:56:41 +0100
commitc8790d657b0a8d42801fb4536f6f106b4b6306e8 (patch)
tree3452c36f3e620b6f151b35f6d23f217aaa1154ba /arch/mips/lib
parentMIPS: Add GINVT instruction helpers (diff)
downloadlinux-c8790d657b0a8d42801fb4536f6f106b4b6306e8.tar.xz
linux-c8790d657b0a8d42801fb4536f6f106b4b6306e8.zip
MIPS: MemoryMapID (MMID) Support
Introduce support for using MemoryMapIDs (MMIDs) as an alternative to Address Space IDs (ASIDs). The major difference between the two is that MMIDs are global - ie. an MMID uniquely identifies an address space across all coherent CPUs. In contrast ASIDs are non-global per-CPU IDs, wherein each address space is allocated a separate ASID for each CPU upon which it is used. This global namespace allows a new GINVT instruction be used to globally invalidate TLB entries associated with a particular MMID across all coherent CPUs in the system, removing the need for IPIs to invalidate entries with separate ASIDs on each CPU. The allocation scheme used here is largely borrowed from arm64 (see arch/arm64/mm/context.c). In essence we maintain a bitmap to track available MMIDs, and MMIDs in active use at the time of a rollover to a new MMID version are preserved in the new version. The allocation scheme requires efficient 64 bit atomics in order to perform reasonably, so this support depends upon CONFIG_GENERIC_ATOMIC64=n (ie. currently it will only be included in MIPS64 kernels). The first, and currently only, available CPU with support for MMIDs is the MIPS I6500. This CPU supports 16 bit MMIDs, and so for now we cap our MMIDs to 16 bits wide in order to prevent the bitmap growing to absurd sizes if any future CPU does implement 32 bit MMIDs as the architecture manuals suggest is recommended. When MMIDs are in use we also make use of GINVT instruction which is available due to the global nature of MMIDs. By executing a sequence of GINVT & SYNC 0x14 instructions we can avoid the overhead of an IPI to each remote CPU in many cases. One complication is that GINVT will invalidate wired entries (in all cases apart from type 0, which targets the entire TLB). In order to avoid GINVT invalidating any wired TLB entries we set up, we make sure to create those entries using a reserved MMID (0) that we never associate with any address space. Also of note is that KVM will require further work in order to support MMIDs & GINVT, since KVM is involved in allocating IDs for guests & in configuring the MMU. That work is not part of this patch, so for now when MMIDs are in use KVM is disabled. Signed-off-by: Paul Burton <paul.burton@mips.com> Cc: linux-mips@vger.kernel.org
Diffstat (limited to 'arch/mips/lib')
-rw-r--r--arch/mips/lib/dump_tlb.c22
1 files changed, 17 insertions, 5 deletions
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 781ad96b78c4..83ed37298e66 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -10,6 +10,7 @@
#include <asm/hazards.h>
#include <asm/mipsregs.h>
+#include <asm/mmu_context.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tlbdebug.h>
@@ -73,12 +74,13 @@ static inline const char *msk2str(unsigned int mask)
static void dump_tlb(int first, int last)
{
- unsigned long s_entryhi, entryhi, asid;
+ unsigned long s_entryhi, entryhi, asid, mmid;
unsigned long long entrylo0, entrylo1, pa;
unsigned int s_index, s_pagemask, s_guestctl1 = 0;
unsigned int pagemask, guestctl1 = 0, c0, c1, i;
unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
+ unsigned long uninitialized_var(s_mmid);
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
@@ -92,7 +94,12 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
- asid = s_entryhi & asidmask;
+
+ if (cpu_has_mmid)
+ asid = s_mmid = read_c0_memorymapid();
+ else
+ asid = s_entryhi & asidmask;
+
if (cpu_has_guestid)
s_guestctl1 = read_c0_guestctl1();
@@ -105,6 +112,12 @@ static void dump_tlb(int first, int last)
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
+
+ if (cpu_has_mmid)
+ mmid = read_c0_memorymapid();
+ else
+ mmid = entryhi & asidmask;
+
if (cpu_has_guestid)
guestctl1 = read_c0_guestctl1();
@@ -124,8 +137,7 @@ static void dump_tlb(int first, int last)
* leave only a single G bit set after a machine check exception
* due to duplicate TLB entry.
*/
- if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
- (entryhi & asidmask) != asid)
+ if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
continue;
/*
@@ -138,7 +150,7 @@ static void dump_tlb(int first, int last)
pr_cont("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
- asidwidth, entryhi & asidmask);
+ asidwidth, mmid);
if (cpu_has_guestid)
pr_cont(" gid=%02lx",
(guestctl1 & MIPS_GCTL1_RID)