summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2014-03-10 23:29:38 +0100
committerScott Wood <scottwood@freescale.com>2014-03-20 01:57:15 +0100
commita3dc620743f44cd509a1ab7b01c33d26fb501c8c (patch)
treebee3b37742185aa7c43f50fefa7c59928e937a03 /arch/powerpc
parentpowerpc/booke64: Use SPRG7 for VDSO (diff)
downloadlinux-a3dc620743f44cd509a1ab7b01c33d26fb501c8c.tar.xz
linux-a3dc620743f44cd509a1ab7b01c33d26fb501c8c.zip
powerpc/booke64: Use SPRG_TLB_EXFRAME on bolted handlers
While bolted handlers (including e6500) do not need to deal with a TLB miss recursively causing another TLB miss, nested TLB misses can still happen with crit/mc/debug exceptions -- so we still need to honor SPRG_TLB_EXFRAME. We don't need to spend time modifying it in the TLB miss fastpath, though -- the special level exception will handle that. Signed-off-by: Scott Wood <scottwood@freescale.com> Cc: Mihai Caraman <mihai.caraman@freescale.com> Cc: kvm-ppc@vger.kernel.org
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/exception-64e.h10
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S11
-rw-r--r--arch/powerpc/mm/tlb_low_64e.S44
4 files changed, 42 insertions, 31 deletions
diff --git a/arch/powerpc/include/asm/exception-64e.h b/arch/powerpc/include/asm/exception-64e.h
index e73452f09019..a563d9afd179 100644
--- a/arch/powerpc/include/asm/exception-64e.h
+++ b/arch/powerpc/include/asm/exception-64e.h
@@ -172,16 +172,6 @@ exc_##label##_book3e:
ld r9,EX_TLB_R9(r12); \
ld r8,EX_TLB_R8(r12); \
mtlr r16;
-#define TLB_MISS_PROLOG_STATS_BOLTED \
- mflr r10; \
- std r8,PACA_EXTLB+EX_TLB_R8(r13); \
- std r9,PACA_EXTLB+EX_TLB_R9(r13); \
- std r10,PACA_EXTLB+EX_TLB_LR(r13);
-#define TLB_MISS_RESTORE_STATS_BOLTED \
- ld r16,PACA_EXTLB+EX_TLB_LR(r13); \
- ld r9,PACA_EXTLB+EX_TLB_R9(r13); \
- ld r8,PACA_EXTLB+EX_TLB_R8(r13); \
- mtlr r16;
#define TLB_MISS_STATS_D(name) \
addi r9,r13,MMSTAT_DSTATS+name; \
bl .tlb_stat_inc;
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
index c3e3fd53a3a9..e5f048bbcb7c 100644
--- a/arch/powerpc/include/asm/kvm_booke_hv_asm.h
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -45,10 +45,12 @@
*
* Expected inputs (TLB exception type):
* r10 = saved CR
+ * r12 = extlb pointer
* r13 = PACA_POINTER
- * *(r13 + PACA_EX##type + EX_TLB_R10) = saved r10
- * *(r13 + PACA_EX##type + EX_TLB_R11) = saved r11
- * SPRN_SPRG_GEN_SCRATCH = saved r13
+ * *(r12 + EX_TLB_R10) = saved r10
+ * *(r12 + EX_TLB_R11) = saved r11
+ * *(r12 + EX_TLB_R13) = saved r13
+ * SPRN_SPRG_GEN_SCRATCH = saved r12
*
* Only the bolted version of TLB miss exception handlers is supported now.
*/
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 99635a37c78c..a1712b818a5f 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -229,13 +229,20 @@
stw r10, VCPU_CR(r4)
PPC_STL r11, VCPU_GPR(R4)(r4)
PPC_STL r5, VCPU_GPR(R5)(r4)
- mfspr r5, \scratch
PPC_STL r6, VCPU_GPR(R6)(r4)
PPC_STL r8, VCPU_GPR(R8)(r4)
PPC_STL r9, VCPU_GPR(R9)(r4)
- PPC_STL r5, VCPU_GPR(R13)(r4)
+ .if \type == EX_TLB
+ PPC_LL r5, EX_TLB_R13(r12)
+ PPC_LL r6, EX_TLB_R10(r12)
+ PPC_LL r8, EX_TLB_R11(r12)
+ mfspr r12, \scratch
+ .else
+ mfspr r5, \scratch
PPC_LL r6, (\paca_ex + \ex_r10)(r13)
PPC_LL r8, (\paca_ex + \ex_r11)(r13)
+ .endif
+ PPC_STL r5, VCPU_GPR(R13)(r4)
PPC_STL r3, VCPU_GPR(R3)(r4)
PPC_STL r7, VCPU_GPR(R7)(r4)
PPC_STL r12, VCPU_GPR(R12)(r4)
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S
index 1e50249e900b..356e8b41fb09 100644
--- a/arch/powerpc/mm/tlb_low_64e.S
+++ b/arch/powerpc/mm/tlb_low_64e.S
@@ -39,37 +39,49 @@
* *
**********************************************************************/
+/*
+ * Note that, unlike non-bolted handlers, TLB_EXFRAME is not
+ * modified by the TLB miss handlers themselves, since the TLB miss
+ * handler code will not itself cause a recursive TLB miss.
+ *
+ * TLB_EXFRAME will be modified when crit/mc/debug exceptions are
+ * entered/exited.
+ */
.macro tlb_prolog_bolted intnum addr
- mtspr SPRN_SPRG_GEN_SCRATCH,r13
+ mtspr SPRN_SPRG_GEN_SCRATCH,r12
+ mfspr r12,SPRN_SPRG_TLB_EXFRAME
+ std r13,EX_TLB_R13(r12)
+ std r10,EX_TLB_R10(r12)
mfspr r13,SPRN_SPRG_PACA
- std r10,PACA_EXTLB+EX_TLB_R10(r13)
+
mfcr r10
- std r11,PACA_EXTLB+EX_TLB_R11(r13)
+ std r11,EX_TLB_R11(r12)
#ifdef CONFIG_KVM_BOOKE_HV
BEGIN_FTR_SECTION
mfspr r11, SPRN_SRR1
END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
#endif
DO_KVM \intnum, SPRN_SRR1
- std r16,PACA_EXTLB+EX_TLB_R16(r13)
+ std r16,EX_TLB_R16(r12)
mfspr r16,\addr /* get faulting address */
- std r14,PACA_EXTLB+EX_TLB_R14(r13)
+ std r14,EX_TLB_R14(r12)
ld r14,PACAPGD(r13)
- std r15,PACA_EXTLB+EX_TLB_R15(r13)
- std r10,PACA_EXTLB+EX_TLB_CR(r13)
- TLB_MISS_PROLOG_STATS_BOLTED
+ std r15,EX_TLB_R15(r12)
+ std r10,EX_TLB_CR(r12)
+ TLB_MISS_PROLOG_STATS
.endm
.macro tlb_epilog_bolted
- ld r14,PACA_EXTLB+EX_TLB_CR(r13)
- ld r10,PACA_EXTLB+EX_TLB_R10(r13)
- ld r11,PACA_EXTLB+EX_TLB_R11(r13)
+ ld r14,EX_TLB_CR(r12)
+ ld r10,EX_TLB_R10(r12)
+ ld r11,EX_TLB_R11(r12)
+ ld r13,EX_TLB_R13(r12)
mtcr r14
- ld r14,PACA_EXTLB+EX_TLB_R14(r13)
- ld r15,PACA_EXTLB+EX_TLB_R15(r13)
- TLB_MISS_RESTORE_STATS_BOLTED
- ld r16,PACA_EXTLB+EX_TLB_R16(r13)
- mfspr r13,SPRN_SPRG_GEN_SCRATCH
+ ld r14,EX_TLB_R14(r12)
+ ld r15,EX_TLB_R15(r12)
+ TLB_MISS_RESTORE_STATS
+ ld r16,EX_TLB_R16(r12)
+ mfspr r12,SPRN_SPRG_GEN_SCRATCH
.endm
/* Data TLB miss */