diff options
author | Alexander Graf <agraf@suse.de> | 2014-05-15 14:36:05 +0200 |
---|---|---|
committer | Alexander Graf <agraf@suse.de> | 2014-05-30 14:26:30 +0200 |
commit | 207438d4e21e05728a8a58b5e25b0f6553260068 (patch) | |
tree | e26c500f44108f4ea3528988459b76ae3bf5d2a3 /arch/powerpc | |
parent | KVM: PPC: Book3S HV: Fix machine check delivery to guest (diff) | |
download | linux-207438d4e21e05728a8a58b5e25b0f6553260068.tar.xz linux-207438d4e21e05728a8a58b5e25b0f6553260068.zip |
KVM: PPC: Book3S PR: Use SLB entry 0
We didn't make use of SLB entry 0 because ... of no good reason. SLB entry 0
will always be used by the Linux linear SLB entry, so the fact that slbia
does not invalidate it doesn't matter as we overwrite SLB 0 on exit anyway.
Just enable use of SLB entry 0 for our shadow SLB code.
Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_host.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_slb.S | 3 |
2 files changed, 6 insertions, 8 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index e2efb85c65a3..0ac98392f363 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c @@ -271,11 +271,8 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) int found_inval = -1; int r; - if (!svcpu->slb_max) - svcpu->slb_max = 1; - /* Are we overwriting? */ - for (i = 1; i < svcpu->slb_max; i++) { + for (i = 0; i < svcpu->slb_max; i++) { if (!(svcpu->slb[i].esid & SLB_ESID_V)) found_inval = i; else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { @@ -285,7 +282,7 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) } /* Found a spare entry that was invalidated before */ - if (found_inval > 0) { + if (found_inval >= 0) { r = found_inval; goto out; } @@ -359,7 +356,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) ulong seg_mask = -seg_size; int i; - for (i = 1; i < svcpu->slb_max; i++) { + for (i = 0; i < svcpu->slb_max; i++) { if ((svcpu->slb[i].esid & SLB_ESID_V) && (svcpu->slb[i].esid & seg_mask) == ea) { /* Invalidate this entry */ @@ -373,7 +370,7 @@ void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) { struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->slb_max = 1; + svcpu->slb_max = 0; svcpu->slb[0].esid = 0; svcpu_put(svcpu); } diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 596140e5c889..84c52c6b5837 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S @@ -138,7 +138,8 @@ slb_do_enter: /* Restore bolted entries from the shadow and fix it along the way */ - /* We don't store anything in entry 0, so we don't need to take care of it */ + li r0, r0 + slbmte r0, r0 slbia isync |