summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2019-04-30 06:28:17 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2019-04-30 14:38:56 +0200
commite9cef0189c5b217fcd4788562862defc27632a01 (patch)
treebb3563fd9df63a7a6975b342d5947e3192dc2ab8 /arch/powerpc/platforms
parentpowerpc/64s: Reimplement book3s idle code in C (diff)
downloadlinux-e9cef0189c5b217fcd4788562862defc27632a01.tar.xz
linux-e9cef0189c5b217fcd4788562862defc27632a01.zip
powerpc/powernv/idle: Restore AMR/UAMOR/AMOR/IAMR after idle
This is an implementation of commits 53a712bae5dd ("powerpc/powernv/idle: Restore AMR/UAMOR/AMOR after idle") and a3f3072db6ca ("powerpc/powernv/idle: Restore IAMR after idle") using the new C-based idle code. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> [mpe: Extract from Nick's patch] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/powernv/idle.c52
1 files changed, 46 insertions, 6 deletions
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 87f5f4ae60ca..c9133f7908ca 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -296,7 +296,6 @@ struct p7_sprs {
/* per subcore */
u64 sdr1;
u64 rpr;
- u64 amor;
/* per thread */
u64 lpcr;
@@ -306,6 +305,12 @@ struct p7_sprs {
u64 spurr;
u64 dscr;
u64 wort;
+
+ /* per thread SPRs that get lost in shallow states */
+ u64 amr;
+ u64 iamr;
+ u64 amor;
+ u64 uamor;
};
static unsigned long power7_idle_insn(unsigned long type)
@@ -342,7 +347,6 @@ static unsigned long power7_idle_insn(unsigned long type)
sprs.sdr1 = mfspr(SPRN_SDR1);
sprs.rpr = mfspr(SPRN_RPR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs.lpcr = mfspr(SPRN_LPCR);
if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
@@ -374,6 +378,13 @@ static unsigned long power7_idle_insn(unsigned long type)
atomic_unlock_thread_idle();
}
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ sprs.amr = mfspr(SPRN_AMR);
+ sprs.iamr = mfspr(SPRN_IAMR);
+ sprs.amor = mfspr(SPRN_AMOR);
+ sprs.uamor = mfspr(SPRN_UAMOR);
+ }
+
local_paca->thread_idle_state = type;
srr1 = isa206_idle_insn_mayloss(type); /* go idle */
local_paca->thread_idle_state = PNV_THREAD_RUNNING;
@@ -381,6 +392,19 @@ static unsigned long power7_idle_insn(unsigned long type)
WARN_ON_ONCE(!srr1);
WARN_ON_ONCE(mfmsr() & (MSR_IR|MSR_DR));
+ if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
+ if ((srr1 & SRR1_WAKESTATE) != SRR1_WS_NOLOSS) {
+ /*
+ * We don't need an isync after the mtsprs here because
+ * the upcoming mtmsrd is execution synchronizing.
+ */
+ mtspr(SPRN_AMR, sprs.amr);
+ mtspr(SPRN_IAMR, sprs.iamr);
+ mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_UAMOR, sprs.uamor);
+ }
+ }
+
if (unlikely((srr1 & SRR1_WAKEMASK_P8) == SRR1_WAKEHMI))
hmi_exception_realmode(NULL);
@@ -444,7 +468,6 @@ core_woken:
/* Per-subcore SPRs */
mtspr(SPRN_SDR1, sprs.sdr1);
mtspr(SPRN_RPR, sprs.rpr);
- mtspr(SPRN_AMOR, sprs.amor);
subcore_woken:
/*
@@ -560,7 +583,6 @@ struct p9_sprs {
u64 rpr;
u64 tscr;
u64 ldbar;
- u64 amor;
/* per thread */
u64 lpcr;
@@ -576,6 +598,12 @@ struct p9_sprs {
u32 mmcr0;
u32 mmcr1;
u64 mmcr2;
+
+ /* per thread SPRs that get lost in shallow states */
+ u64 amr;
+ u64 iamr;
+ u64 amor;
+ u64 uamor;
};
static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
@@ -652,13 +680,17 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
sprs.rpr = mfspr(SPRN_RPR);
sprs.tscr = mfspr(SPRN_TSCR);
sprs.ldbar = mfspr(SPRN_LDBAR);
- sprs.amor = mfspr(SPRN_AMOR);
sprs_saved = true;
atomic_start_thread_idle();
}
+ sprs.amr = mfspr(SPRN_AMR);
+ sprs.iamr = mfspr(SPRN_IAMR);
+ sprs.amor = mfspr(SPRN_AMOR);
+ sprs.uamor = mfspr(SPRN_UAMOR);
+
srr1 = isa300_idle_stop_mayloss(psscr); /* go idle */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
@@ -674,6 +706,15 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
unsigned long mmcra;
/*
+ * We don't need an isync after the mtsprs here because the
+ * upcoming mtmsrd is execution synchronizing.
+ */
+ mtspr(SPRN_AMR, sprs.amr);
+ mtspr(SPRN_IAMR, sprs.iamr);
+ mtspr(SPRN_AMOR, sprs.amor);
+ mtspr(SPRN_UAMOR, sprs.uamor);
+
+ /*
* Workaround for POWER9 DD2.0, if we lost resources, the ERAT
* might have been corrupted and needs flushing. We also need
* to reload MMCR0 (see mmcr0 comment above).
@@ -722,7 +763,6 @@ static unsigned long power9_idle_stop(unsigned long psscr, bool mmu_on)
mtspr(SPRN_RPR, sprs.rpr);
mtspr(SPRN_TSCR, sprs.tscr);
mtspr(SPRN_LDBAR, sprs.ldbar);
- mtspr(SPRN_AMOR, sprs.amor);
if (pls >= pnv_first_tb_loss_level) {
/* TB loss */