summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2015-03-28 04:21:11 +0100
committerAlexander Graf <agraf@suse.de>2015-04-21 15:21:34 +0200
commiteddb60fb1443f85c5728f1b1cd4be608c6832a79 (patch)
treea4b4cb322d3e067ba4e53825152ff45efaba9022 /arch/powerpc
parentKVM: PPC: Book3S HV: Streamline guest entry and exit (diff)
downloadlinux-eddb60fb1443f85c5728f1b1cd4be608c6832a79.tar.xz
linux-eddb60fb1443f85c5728f1b1cd4be608c6832a79.zip
KVM: PPC: Book3S HV: Translate kvmhv_commence_exit to C
This replaces the assembler code for kvmhv_commence_exit() with C code in book3s_hv_builtin.c. It also moves the IPI sending code that was in book3s_hv_rm_xics.c into a new kvmhv_rm_send_ipi() function so it can be used by kvmhv_commence_exit() as well as icp_rm_set_vcpu_irq(). Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h2
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c63
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_xics.c12
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S66
4 files changed, 75 insertions, 68 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 869c53fe02cd..2b84e485a181 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -438,6 +438,8 @@ static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
+extern void kvmhv_rm_send_ipi(int cpu);
+
#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 275425142bb7..c42aa55b885f 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -22,6 +22,7 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/archrandom.h>
+#include <asm/xics.h>
#define KVM_CMA_CHUNK_ORDER 18
@@ -184,3 +185,65 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
return H_HARDWARE;
}
+
+static inline void rm_writeb(unsigned long paddr, u8 val)
+{
+ __asm__ __volatile__("stbcix %0,0,%1"
+ : : "r" (val), "r" (paddr) : "memory");
+}
+
+/*
+ * Send an interrupt to another CPU.
+ * This can only be called in real mode.
+ * The caller needs to include any barrier needed to order writes
+ * to memory vs. the IPI/message.
+ */
+void kvmhv_rm_send_ipi(int cpu)
+{
+ unsigned long xics_phys;
+
+ /* Poke the target */
+ xics_phys = paca[cpu].kvm_hstate.xics_phys;
+ rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+}
+
+/*
+ * The following functions are called from the assembly code
+ * in book3s_hv_rmhandlers.S.
+ */
+static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active)
+{
+ int cpu = vc->pcpu;
+
+ /* Order setting of exit map vs. msgsnd/IPI */
+ smp_mb();
+ for (; active; active >>= 1, ++cpu)
+ if (active & 1)
+ kvmhv_rm_send_ipi(cpu);
+}
+
+void kvmhv_commence_exit(int trap)
+{
+ struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
+ int ptid = local_paca->kvm_hstate.ptid;
+ int me, ee;
+
+ /* Set our bit in the threads-exiting-guest map in the 0xff00
+ bits of vcore->entry_exit_map */
+ me = 0x100 << ptid;
+ do {
+ ee = vc->entry_exit_map;
+ } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee);
+
+ /* Are we the first here? */
+ if ((ee >> 8) != 0)
+ return;
+
+ /*
+ * Trigger the other threads in this vcore to exit the guest.
+ * If this is a hypervisor decrementer interrupt then they
+ * will be already on their way out of the guest.
+ */
+ if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER)
+ kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid));
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 6dded8c75234..00e45b6d4f24 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -26,12 +26,6 @@
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
-static inline void rm_writeb(unsigned long paddr, u8 val)
-{
- __asm__ __volatile__("sync; stbcix %0,0,%1"
- : : "r" (val), "r" (paddr) : "memory");
-}
-
/* -- ICS routines -- */
static void ics_rm_check_resend(struct kvmppc_xics *xics,
struct kvmppc_ics *ics, struct kvmppc_icp *icp)
@@ -60,7 +54,6 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
struct kvm_vcpu *this_vcpu)
{
struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
- unsigned long xics_phys;
int cpu;
/* Mark the target VCPU as having an interrupt pending */
@@ -83,9 +76,8 @@ static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
/* In SMT cpu will always point to thread 0, we adjust it */
cpu += vcpu->arch.ptid;
- /* Not too hard, then poke the target */
- xics_phys = paca[cpu].kvm_hstate.xics_phys;
- rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
+ smp_mb();
+ kvmhv_rm_send_ipi(cpu);
}
static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 3f6fd78cccd2..fcf3a617cc8a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -264,7 +264,11 @@ kvm_novcpu_exit:
addi r3, r4, VCPU_TB_RMEXIT
bl kvmhv_accumulate_time
#endif
-13: bl kvmhv_commence_exit
+13: mr r3, r12
+ stw r12, 112-4(r1)
+ bl kvmhv_commence_exit
+ nop
+ lwz r12, 112-4(r1)
b kvmhv_switch_to_host
/*
@@ -1161,6 +1165,9 @@ mc_cont:
/* Increment exit count, poke other threads to exit */
bl kvmhv_commence_exit
+ nop
+ ld r9, HSTATE_KVM_VCPU(r13)
+ lwz r12, VCPU_TRAP(r9)
/* Save guest CTRL register, set runlatch to 1 */
mfspr r6,SPRN_CTRLF
@@ -1614,63 +1621,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
mtlr r0
blr
-kvmhv_commence_exit: /* r12 = trap, r13 = paca, doesn't trash r9 */
- mflr r0
- std r0, PPC_LR_STKOFF(r1)
- stdu r1, -PPC_MIN_STKFRM(r1)
-
- /* Set our bit in the threads-exiting-guest map in the 0xff00
- bits of vcore->entry_exit_map */
- ld r5, HSTATE_KVM_VCORE(r13)
- lbz r4, HSTATE_PTID(r13)
- li r7, 0x100
- sld r7, r7, r4
- addi r6, r5, VCORE_ENTRY_EXIT
-41: lwarx r3, 0, r6
- or r0, r3, r7
- stwcx. r0, 0, r6
- bne 41b
- isync /* order stwcx. vs. reading napping_threads */
-
- /*
- * At this point we have an interrupt that we have to pass
- * up to the kernel or qemu; we can't handle it in real mode.
- * Thus we have to do a partition switch, so we have to
- * collect the other threads, if we are the first thread
- * to take an interrupt. To do this, we send a message or
- * IPI to all the threads that have their bit set in the entry
- * map in vcore->entry_exit_map (other than ourselves).
- * However, we don't need to bother if this is an HDEC
- * interrupt, since the other threads will already be on their
- * way here in that case.
- */
- cmpwi r3,0x100 /* Are we the first here? */
- bge 43f
- cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
- beq 43f
-
- srwi r0,r7,8
- andc. r3,r3,r0 /* no sense IPI'ing ourselves */
- beq 43f
- /* Order entry/exit update vs. IPIs */
- sync
- mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
- subf r6,r4,r13
-42: andi. r0,r3,1
- beq 44f
- ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
- li r0,IPI_PRIORITY
- li r7,XICS_MFRR
- stbcix r0,r7,r8 /* trigger the IPI */
-44: srdi. r3,r3,1
- addi r6,r6,PACA_SIZE
- bne 42b
-
-43: ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
- addi r1, r1, PPC_MIN_STKFRM
- mtlr r0
- blr
-
/*
* Check whether an HDSI is an HPTE not found fault or something else.
* If it is an HPTE not found fault that is due to the guest accessing