diff options
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_entry.S | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_p9_entry.c | 40 |
3 files changed, 42 insertions, 13 deletions
diff --git a/arch/powerpc/kvm/book3s_64_entry.S b/arch/powerpc/kvm/book3s_64_entry.S index 983b8c18bc31..05e003eb5d90 100644 --- a/arch/powerpc/kvm/book3s_64_entry.S +++ b/arch/powerpc/kvm/book3s_64_entry.S @@ -374,11 +374,16 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX) BEGIN_FTR_SECTION mtspr SPRN_DAWRX1,r10 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) - mtspr SPRN_PID,r10 /* - * Switch to host MMU mode + * Switch to host MMU mode (don't have the real host PID but we aren't + * going back to userspace). */ + hwsync + isync + + mtspr SPRN_PID,r10 + ld r10, HSTATE_KVM_VCPU(r13) ld r10, VCPU_KVM(r10) lwz r10, KVM_HOST_LPID(r10) @@ -389,6 +394,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_DAWR1) ld r10, KVM_HOST_LPCR(r10) mtspr SPRN_LPCR,r10 + isync + /* * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear * MSR_RI in r12 ([H]SRR1) so the handler won't try to return. diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 16359525a40f..8cebe5542256 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -57,6 +57,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, preempt_disable(); + asm volatile("hwsync" ::: "memory"); + isync(); /* switch the lpid first to avoid running host with unallocated pid */ old_lpid = mfspr(SPRN_LPID); if (old_lpid != lpid) @@ -75,6 +77,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, ret = __copy_to_user_inatomic((void __user *)to, from, n); pagefault_enable(); + asm volatile("hwsync" ::: "memory"); + isync(); /* switch the pid first to avoid running host with unallocated pid */ if (quadrant == 1 && pid != old_pid) mtspr(SPRN_PID, old_pid); diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index 093ac0453d91..323b692bbfe2 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -531,17 +531,19 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; /* - * All the isync()s are overkill but trivially follow the ISA - * requirements. Some can likely be replaced with justification - * comment for why they are not needed. + * Prior memory accesses to host PID Q3 must be completed before we + * start switching, and stores must be drained to avoid not-my-LPAR + * logic (see switch_mmu_to_host). */ + asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_LPID, lpid); - isync(); mtspr(SPRN_LPCR, lpcr); - isync(); mtspr(SPRN_PID, vcpu->arch.pid); - isync(); + /* + * isync not required here because we are HRFID'ing to guest before + * any guest context access, which is context synchronising. + */ } static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) @@ -551,25 +553,41 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpid = kvm->arch.lpid; + /* + * See switch_mmu_to_guest_radix. ptesync should not be required here + * even if the host is in HPT mode because speculative accesses would + * not cause RC updates (we are in real mode). + */ + asm volatile("hwsync" ::: "memory"); + isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); mtspr(SPRN_PID, vcpu->arch.pid); for (i = 0; i < vcpu->arch.slb_max; i++) mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv); - - isync(); + /* + * isync not required here, see switch_mmu_to_guest_radix. + */ } static void switch_mmu_to_host(struct kvm *kvm, u32 pid) { + /* + * The guest has exited, so guest MMU context is no longer being + * non-speculatively accessed, but a hwsync is needed before the + * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained, + * so the not-my-LPAR tlbie logic does not overlook them. + */ + asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_PID, pid); - isync(); mtspr(SPRN_LPID, kvm->arch.host_lpid); - isync(); mtspr(SPRN_LPCR, kvm->arch.host_lpcr); - isync(); + /* + * isync is not required after the switch, because mtmsrd with L=0 + * is performed after this switch, which is context synchronising. + */ if (!radix_enabled()) slb_restore_bolted_realmode(); |