diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2021-11-23 10:52:09 +0100 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2021-11-24 11:09:00 +0100 |
commit | d5f480194577423731ee8413791a5486f26a95ab (patch) | |
tree | be2385b08f62b1a05b8c795312878aeb429a1526 /arch/powerpc/kvm/book3s_hv_p9_entry.c | |
parent | KVM: PPC: Book3S HV P9: Move nested guest entry into its own function (diff) | |
download | linux-d5f480194577423731ee8413791a5486f26a95ab.tar.xz linux-d5f480194577423731ee8413791a5486f26a95ab.zip |
KVM: PPC: Book3S HV P9: Move remaining SPR and MSR access into low level entry
Move register saving and loading from kvmhv_p9_guest_entry() into the HV
and nested entry handlers.
Accesses are scheduled to reduce mtSPR / mfSPR interleaving which
reduces SPR scoreboard stalls.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20211123095231.1036501-32-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_p9_entry.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_p9_entry.c | 96 |
1 files changed, 73 insertions, 23 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 784ff5429ebc..fa080533bd8d 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -538,6 +538,7 @@ static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu) int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb) { + struct p9_host_os_sprs host_os_sprs; struct kvm *kvm = vcpu->kvm; struct kvm_nested_guest *nested = vcpu->arch.nested; struct kvmppc_vcore *vc = vcpu->arch.vcore; @@ -567,9 +568,6 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc vcpu->arch.ceded = 0; - /* Could avoid mfmsr by passing around, but probably no big deal */ - msr = mfmsr(); - host_hfscr = mfspr(SPRN_HFSCR); host_ciabr = mfspr(SPRN_CIABR); host_dawr0 = mfspr(SPRN_DAWR0); @@ -584,6 +582,41 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR); local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR); + switch_pmu_to_guest(vcpu, &host_os_sprs); + + save_p9_host_os_sprs(&host_os_sprs); + + /* + * This could be combined with MSR[RI] clearing, but that expands + * the unrecoverable window. It would be better to cover unrecoverable + * with KVM bad interrupt handling rather than use MSR[RI] at all. + * + * Much more difficult and less worthwhile to combine with IR/DR + * disable. + */ + hard_irq_disable(); + if (lazy_irq_pending()) { + trap = 0; + goto out; + } + + /* MSR bits may have been cleared by context switch */ + msr = 0; + if (IS_ENABLED(CONFIG_PPC_FPU)) + msr |= MSR_FP; + if (cpu_has_feature(CPU_FTR_ALTIVEC)) + msr |= MSR_VEC; + if (cpu_has_feature(CPU_FTR_VSX)) + msr |= MSR_VSX; + if (cpu_has_feature(CPU_FTR_TM) || + cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) + msr |= MSR_TM; + msr = msr_check_and_set(msr); + /* Save MSR for restore. This is after hard disable, so EE is clear. */ + + if (unlikely(load_vcpu_state(vcpu, &host_os_sprs))) + msr = mfmsr(); /* MSR may have been updated */ + if (vc->tb_offset) { u64 new_tb = *tb + vc->tb_offset; mtspr(SPRN_TBU40, new_tb); @@ -642,6 +675,14 @@ int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpc mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2); mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3); + /* + * It might be preferable to load_vcpu_state here, in order to get the + * GPR/FP register loads executing in parallel with the previous mtSPR + * instructions, but for now that can't be done because the TM handling + * in load_vcpu_state can change some SPRs and vcpu state (nip, msr). + * But TM could be split out if this would be a significant benefit. + */ + local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_HV_P9; /* @@ -819,6 +860,20 @@ tm_return_to_guest: vc->dpdes = mfspr(SPRN_DPDES); vc->vtb = mfspr(SPRN_VTB); + save_clear_guest_mmu(kvm, vcpu); + switch_mmu_to_host(kvm, host_pidr); + + /* + * If we are in real mode, only switch MMU on after the MMU is + * switched to host, to avoid the P9_RADIX_PREFETCH_BUG. + */ + if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && + vcpu->arch.shregs.msr & MSR_TS_MASK) + msr |= MSR_TS_S; + __mtmsrd(msr, 0); + + store_vcpu_state(vcpu); + dec = mfspr(SPRN_DEC); if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ dec = (s32) dec; @@ -851,6 +906,19 @@ tm_return_to_guest: mtspr(SPRN_DAWRX1, host_dawrx1); } + mtspr(SPRN_DPDES, 0); + if (vc->pcr) + mtspr(SPRN_PCR, PCR_MASK); + + /* HDEC must be at least as large as DEC, so decrementer_max fits */ + mtspr(SPRN_HDEC, decrementer_max); + + timer_rearm_host_dec(*tb); + + restore_p9_host_os_sprs(vcpu, &host_os_sprs); + + local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE; + if (kvm_is_radix(kvm)) { /* * Since this is radix, do a eieio; tlbsync; ptesync sequence @@ -867,26 +935,8 @@ tm_return_to_guest: if (cpu_has_feature(CPU_FTR_ARCH_31)) asm volatile(PPC_CP_ABORT); - mtspr(SPRN_DPDES, 0); - if (vc->pcr) - mtspr(SPRN_PCR, PCR_MASK); - - /* HDEC must be at least as large as DEC, so decrementer_max fits */ - mtspr(SPRN_HDEC, decrementer_max); - - save_clear_guest_mmu(kvm, vcpu); - switch_mmu_to_host(kvm, host_pidr); - local_paca->kvm_hstate.in_guest = KVM_GUEST_MODE_NONE; - - /* - * If we are in real mode, only switch MMU on after the MMU is - * switched to host, to avoid the P9_RADIX_PREFETCH_BUG. - */ - if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) && - vcpu->arch.shregs.msr & MSR_TS_MASK) - msr |= MSR_TS_S; - - __mtmsrd(msr, 0); +out: + switch_pmu_to_host(vcpu, &host_os_sprs); end_timing(vcpu); |