summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2019-02-04 09:06:18 +0100
committerPaul Mackerras <paulus@ozlabs.org>2019-02-19 04:25:52 +0100
commit41a8645ab1c3c37f96955fec3360e123dc06abcd (patch)
tree7030f6d35acb66059c8096be7447fa7404579566 /arch/powerpc
parentLinux 5.0-rc6 (diff)
downloadlinux-41a8645ab1c3c37f96955fec3360e123dc06abcd.tar.xz
linux-41a8645ab1c3c37f96955fec3360e123dc06abcd.zip
KVM: PPC: Book3S PR: Add emulation for slbfee. instruction
Recent kernels, since commit e15a4fea4dee ("powerpc/64s/hash: Add some SLB debugging tests", 2018-10-03) use the slbfee. instruction, which PR KVM currently does not have code to emulate. Consequently recent kernels fail to boot under PR KVM. This adds emulation of slbfee., enabling these kernels to boot successfully. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_host.h1
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu.c1
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c14
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c18
4 files changed, 34 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 0f98f00da2ea..091430339db1 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -377,6 +377,7 @@ struct kvmppc_mmu {
void (*slbmte)(struct kvm_vcpu *vcpu, u64 rb, u64 rs);
u64 (*slbmfee)(struct kvm_vcpu *vcpu, u64 slb_nr);
u64 (*slbmfev)(struct kvm_vcpu *vcpu, u64 slb_nr);
+ int (*slbfee)(struct kvm_vcpu *vcpu, gva_t eaddr, ulong *ret_slb);
void (*slbie)(struct kvm_vcpu *vcpu, u64 slb_nr);
void (*slbia)(struct kvm_vcpu *vcpu);
/* book3s */
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c
index 612169988a3d..6f789f674048 100644
--- a/arch/powerpc/kvm/book3s_32_mmu.c
+++ b/arch/powerpc/kvm/book3s_32_mmu.c
@@ -425,6 +425,7 @@ void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu)
mmu->slbmte = NULL;
mmu->slbmfee = NULL;
mmu->slbmfev = NULL;
+ mmu->slbfee = NULL;
mmu->slbie = NULL;
mmu->slbia = NULL;
}
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index c92dd25bed23..d4b967f0e8d4 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -435,6 +435,19 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT);
}
+static int kvmppc_mmu_book3s_64_slbfee(struct kvm_vcpu *vcpu, gva_t eaddr,
+ ulong *ret_slb)
+{
+ struct kvmppc_slb *slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr);
+
+ if (slbe) {
+ *ret_slb = slbe->origv;
+ return 0;
+ }
+ *ret_slb = 0;
+ return -ENOENT;
+}
+
static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr)
{
struct kvmppc_slb *slbe;
@@ -670,6 +683,7 @@ void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu)
mmu->slbmte = kvmppc_mmu_book3s_64_slbmte;
mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee;
mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev;
+ mmu->slbfee = kvmppc_mmu_book3s_64_slbfee;
mmu->slbie = kvmppc_mmu_book3s_64_slbie;
mmu->slbia = kvmppc_mmu_book3s_64_slbia;
mmu->xlate = kvmppc_mmu_book3s_64_xlate;
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 8c7e933e942e..6ef7c5f00a49 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -47,6 +47,7 @@
#define OP_31_XOP_SLBMFEV 851
#define OP_31_XOP_EIOIO 854
#define OP_31_XOP_SLBMFEE 915
+#define OP_31_XOP_SLBFEE 979
#define OP_31_XOP_TBEGIN 654
#define OP_31_XOP_TABORT 910
@@ -416,6 +417,23 @@ int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->arch.mmu.slbia(vcpu);
break;
+ case OP_31_XOP_SLBFEE:
+ if (!(inst & 1) || !vcpu->arch.mmu.slbfee) {
+ return EMULATE_FAIL;
+ } else {
+ ulong b, t;
+ ulong cr = kvmppc_get_cr(vcpu) & ~CR0_MASK;
+
+ b = kvmppc_get_gpr(vcpu, rb);
+ if (!vcpu->arch.mmu.slbfee(vcpu, b, &t))
+ cr |= 2 << CR0_SHIFT;
+ kvmppc_set_gpr(vcpu, rt, t);
+ /* copy XER[SO] bit to CR0[SO] */
+ cr |= (vcpu->arch.regs.xer & 0x80000000) >>
+ (31 - CR0_SHIFT);
+ kvmppc_set_cr(vcpu, cr);
+ }
+ break;
case OP_31_XOP_SLBMFEE:
if (!vcpu->arch.mmu.slbmfee) {
emulated = EMULATE_FAIL;