diff options
author | Michael Ellerman <mpe@ellerman.id.au> | 2022-05-19 15:10:42 +0200 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-05-19 15:10:42 +0200 |
commit | b104e41cda1ef9c5e851a7de3f30b53535e7d528 (patch) | |
tree | 7bab688d125e67e42d387b97c45522c76155bbe2 /arch/powerpc/kvm/book3s_hv_p9_entry.c | |
parent | Merge branch 'fixes' into next (diff) | |
parent | KVM: PPC: Book3S HV: Fix vcore_blocked tracepoint (diff) | |
download | linux-b104e41cda1ef9c5e851a7de3f30b53535e7d528.tar.xz linux-b104e41cda1ef9c5e851a7de3f30b53535e7d528.zip |
Merge branch 'topic/ppc-kvm' into next
Merge our KVM topic branch.
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_p9_entry.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_p9_entry.c | 15 |
1 files changed, 11 insertions, 4 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c index ac38c1cad378..112a09b33328 100644 --- a/arch/powerpc/kvm/book3s_hv_p9_entry.c +++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c @@ -539,8 +539,10 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 { struct kvm_nested_guest *nested = vcpu->arch.nested; u32 lpid; + u32 pid; lpid = nested ? nested->shadow_lpid : kvm->arch.lpid; + pid = vcpu->arch.pid; /* * Prior memory accesses to host PID Q3 must be completed before we @@ -551,7 +553,7 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); - mtspr(SPRN_PID, vcpu->arch.pid); + mtspr(SPRN_PID, pid); /* * isync not required here because we are HRFID'ing to guest before * any guest context access, which is context synchronising. @@ -561,9 +563,11 @@ static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u6 static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr) { u32 lpid; + u32 pid; int i; lpid = kvm->arch.lpid; + pid = vcpu->arch.pid; /* * See switch_mmu_to_guest_radix. ptesync should not be required here @@ -574,7 +578,7 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 isync(); mtspr(SPRN_LPID, lpid); mtspr(SPRN_LPCR, lpcr); - mtspr(SPRN_PID, vcpu->arch.pid); + mtspr(SPRN_PID, pid); for (i = 0; i < vcpu->arch.slb_max; i++) mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv); @@ -585,6 +589,9 @@ static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 static void switch_mmu_to_host(struct kvm *kvm, u32 pid) { + u32 lpid = kvm->arch.host_lpid; + u64 lpcr = kvm->arch.host_lpcr; + /* * The guest has exited, so guest MMU context is no longer being * non-speculatively accessed, but a hwsync is needed before the @@ -594,8 +601,8 @@ static void switch_mmu_to_host(struct kvm *kvm, u32 pid) asm volatile("hwsync" ::: "memory"); isync(); mtspr(SPRN_PID, pid); - mtspr(SPRN_LPID, kvm->arch.host_lpid); - mtspr(SPRN_LPCR, kvm->arch.host_lpcr); + mtspr(SPRN_LPID, lpid); + mtspr(SPRN_LPCR, lpcr); /* * isync is not required after the switch, because mtmsrd with L=0 * is performed after this switch, which is context synchronising. |