summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h6
-rw-r--r--arch/powerpc/kvm/Makefile13
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu.c81
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S13
-rw-r--r--arch/powerpc/kvm/book3s_pr.c3
-rw-r--r--arch/powerpc/kvm/booke.c2
-rw-r--r--arch/powerpc/kvm/emulate.c3
8 files changed, 89 insertions, 53 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 349ed85c7d61..08891d07aeb6 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -107,8 +107,9 @@ struct kvmppc_vcpu_book3s {
#define CONTEXT_GUEST 1
#define CONTEXT_GUEST_END 2
-#define VSID_REAL 0x1fffffffffc00000ULL
-#define VSID_BAT 0x1fffffffffb00000ULL
+#define VSID_REAL 0x0fffffffffc00000ULL
+#define VSID_BAT 0x0fffffffffb00000ULL
+#define VSID_1T 0x1000000000000000ULL
#define VSID_REAL_DR 0x2000000000000000ULL
#define VSID_REAL_IR 0x4000000000000000ULL
#define VSID_PR 0x8000000000000000ULL
@@ -123,6 +124,7 @@ extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu);
extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
+extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size);
extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
struct kvm_vcpu *vcpu, unsigned long addr,
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 422de3f4d46c..008cd856c5b5 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -5,9 +5,10 @@
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm
+KVM := ../../../virt/kvm
-common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o \
- eventfd.o)
+common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
+ $(KVM)/eventfd.o
CFLAGS_44x_tlb.o := -I.
CFLAGS_e500_mmu.o := -I.
@@ -53,7 +54,7 @@ kvm-e500mc-objs := \
kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
- ../../../virt/kvm/coalesced_mmio.o \
+ $(KVM)/coalesced_mmio.o \
fpu.o \
book3s_paired_singles.o \
book3s_pr.o \
@@ -86,8 +87,8 @@ kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
book3s_xics.o
kvm-book3s_64-module-objs := \
- ../../../virt/kvm/kvm_main.o \
- ../../../virt/kvm/eventfd.o \
+ $(KVM)/kvm_main.o \
+ $(KVM)/eventfd.o \
powerpc.o \
emulate.o \
book3s.o \
@@ -111,7 +112,7 @@ kvm-book3s_32-objs := \
kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
kvm-objs-$(CONFIG_KVM_MPIC) += mpic.o
-kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(addprefix ../../../virt/kvm/, irqchip.o)
+kvm-objs-$(CONFIG_HAVE_KVM_IRQ_ROUTING) += $(KVM)/irqchip.o
kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c
index b871721c0050..739bfbadb85e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu.c
+++ b/arch/powerpc/kvm/book3s_64_mmu.c
@@ -26,6 +26,7 @@
#include <asm/tlbflush.h>
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
+#include <asm/mmu-hash64.h>
/* #define DEBUG_MMU */
@@ -76,6 +77,24 @@ static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe(
return NULL;
}
+static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe)
+{
+ return slbe->tb ? SID_SHIFT_1T : SID_SHIFT;
+}
+
+static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe)
+{
+ return (1ul << kvmppc_slb_sid_shift(slbe)) - 1;
+}
+
+static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr)
+{
+ eaddr &= kvmppc_slb_offset_mask(slb);
+
+ return (eaddr >> VPN_SHIFT) |
+ ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT));
+}
+
static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
bool data)
{
@@ -85,11 +104,7 @@ static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr,
if (!slb)
return 0;
- if (slb->tb)
- return (((u64)eaddr >> 12) & 0xfffffff) |
- (((u64)slb->vsid) << 28);
-
- return (((u64)eaddr >> 12) & 0xffff) | (((u64)slb->vsid) << 16);
+ return kvmppc_slb_calc_vpn(slb, eaddr);
}
static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
@@ -100,7 +115,8 @@ static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe)
static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr)
{
int p = kvmppc_mmu_book3s_64_get_pagesize(slbe);
- return ((eaddr & 0xfffffff) >> p);
+
+ return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p);
}
static hva_t kvmppc_mmu_book3s_64_get_pteg(
@@ -109,13 +125,15 @@ static hva_t kvmppc_mmu_book3s_64_get_pteg(
bool second)
{
u64 hash, pteg, htabsize;
- u32 page;
+ u32 ssize;
hva_t r;
+ u64 vpn;
- page = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1);
- hash = slbe->vsid ^ page;
+ vpn = kvmppc_slb_calc_vpn(slbe, eaddr);
+ ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M;
+ hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize);
if (second)
hash = ~hash;
hash &= ((1ULL << 39ULL) - 1ULL);
@@ -146,7 +164,7 @@ static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr)
u64 avpn;
avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr);
- avpn |= slbe->vsid << (28 - p);
+ avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p);
if (p < 24)
avpn >>= ((80 - p) - 56) - 8;
@@ -167,7 +185,6 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
int i;
u8 key = 0;
bool found = false;
- bool perm_err = false;
int second = 0;
ulong mp_ea = vcpu->arch.magic_page_ea;
@@ -190,13 +207,15 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
if (!slbe)
goto no_seg_found;
+ avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
+ if (slbe->tb)
+ avpn |= SLB_VSID_B_1T;
+
do_second:
ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu_book3s, slbe, eaddr, second);
if (kvm_is_error_hva(ptegp))
goto no_page_found;
- avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr);
-
if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) {
printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp);
goto no_page_found;
@@ -219,7 +238,7 @@ do_second:
continue;
/* AVPN compare */
- if (HPTE_V_AVPN_VAL(avpn) == HPTE_V_AVPN_VAL(v)) {
+ if (HPTE_V_COMPARE(avpn, v)) {
u8 pp = (r & HPTE_R_PP) | key;
int eaddr_mask = 0xFFF;
@@ -248,11 +267,6 @@ do_second:
break;
}
- if (!gpte->may_read) {
- perm_err = true;
- continue;
- }
-
dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx "
"-> 0x%lx\n",
eaddr, avpn, gpte->vpage, gpte->raddr);
@@ -281,6 +295,8 @@ do_second:
if (pteg[i+1] != oldr)
copy_to_user((void __user *)ptegp, pteg, sizeof(pteg));
+ if (!gpte->may_read)
+ return -EPERM;
return 0;
} else {
dprintk("KVM MMU: No PTE found (ea=0x%lx sdr1=0x%llx "
@@ -296,13 +312,7 @@ do_second:
}
}
-
no_page_found:
-
-
- if (perm_err)
- return -EPERM;
-
return -ENOENT;
no_seg_found:
@@ -334,7 +344,7 @@ static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb)
slbe->large = (rs & SLB_VSID_L) ? 1 : 0;
slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0;
slbe->esid = slbe->tb ? esid_1t : esid;
- slbe->vsid = rs >> 12;
+ slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16);
slbe->valid = (rb & SLB_ESID_V) ? 1 : 0;
slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0;
slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0;
@@ -375,6 +385,7 @@ static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr)
static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
{
struct kvmppc_slb *slbe;
+ u64 seg_size;
dprintk("KVM MMU: slbie(0x%llx)\n", ea);
@@ -386,8 +397,11 @@ static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea)
dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid);
slbe->valid = false;
+ slbe->orige = 0;
+ slbe->origv = 0;
- kvmppc_mmu_map_segment(vcpu, ea);
+ seg_size = 1ull << kvmppc_slb_sid_shift(slbe);
+ kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size);
}
static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
@@ -396,8 +410,11 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu)
dprintk("KVM MMU: slbia()\n");
- for (i = 1; i < vcpu->arch.slb_nr; i++)
+ for (i = 1; i < vcpu->arch.slb_nr; i++) {
vcpu->arch.slb[i].valid = false;
+ vcpu->arch.slb[i].orige = 0;
+ vcpu->arch.slb[i].origv = 0;
+ }
if (vcpu->arch.shared->msr & MSR_IR) {
kvmppc_mmu_flush_segments(vcpu);
@@ -467,8 +484,14 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid,
if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea);
- if (slb)
+ if (slb) {
gvsid = slb->vsid;
+ if (slb->tb) {
+ gvsid <<= SID_SHIFT_1T - SID_SHIFT;
+ gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1);
+ gvsid |= VSID_1T;
+ }
+ }
}
switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index 3a9a1aceb14f..b350d9494b26 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -301,6 +301,23 @@ out:
return r;
}
+void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
+{
+ struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ ulong seg_mask = -seg_size;
+ int i;
+
+ for (i = 1; i < svcpu->slb_max; i++) {
+ if ((svcpu->slb[i].esid & SLB_ESID_V) &&
+ (svcpu->slb[i].esid & seg_mask) == ea) {
+ /* Invalidate this entry */
+ svcpu->slb[i].esid = 0;
+ }
+ }
+
+ svcpu_put(svcpu);
+}
+
void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
{
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
@@ -325,9 +342,9 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
return -1;
vcpu3s->context_id[0] = err;
- vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
+ vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
<< ESID_BITS) - 1;
- vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
+ vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu);
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 56b983e7b738..4f0caecc0f9d 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -66,10 +66,6 @@ slb_exit_skip_ ## num:
ld r12, PACA_SLBSHADOWPTR(r13)
- /* Save off the first entry so we can slbie it later */
- ld r10, SHADOW_SLB_ESID(0)(r12)
- ld r11, SHADOW_SLB_VSID(0)(r12)
-
/* Remove bolted entries */
UNBOLT_SLB_ENTRY(0)
UNBOLT_SLB_ENTRY(1)
@@ -81,15 +77,10 @@ slb_exit_skip_ ## num:
/* Flush SLB */
+ li r10, 0
+ slbmte r10, r10
slbia
- /* r0 = esid & ESID_MASK */
- rldicr r10, r10, 0, 35
- /* r0 |= CLASS_BIT(VSID) */
- rldic r12, r11, 56 - 36, 36
- or r10, r10, r12
- slbie r10
-
/* Fill SLB with our shadow */
lbz r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index bdc40b8e77d9..19498a567a81 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -1239,8 +1239,7 @@ out:
#ifdef CONFIG_PPC64
int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
{
- /* No flags */
- info->flags = 0;
+ info->flags = KVM_PPC_1T_SEGMENTS;
/* SLB is always 64 entries */
info->slb_size = 64;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 1a1b51189773..dcc94f016007 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -796,7 +796,7 @@ static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
kvmppc_fill_pt_regs(&regs);
timer_interrupt(&regs);
break;
-#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
+#if defined(CONFIG_PPC_DOORBELL)
case BOOKE_INTERRUPT_DOORBELL:
kvmppc_fill_pt_regs(&regs);
doorbell_exception(&regs);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 631a2650e4e4..2c52ada30775 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -169,6 +169,9 @@ static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
vcpu->arch.shared->sprg3 = spr_val;
break;
+ /* PIR can legally be written, but we ignore it */
+ case SPRN_PIR: break;
+
default:
emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
spr_val);