summaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm/emulate.c
diff options
context:
space:
mode:
authorChristoffer Dall <c.dall@virtualopensystems.com>2013-01-21 00:28:09 +0100
committerChristoffer Dall <c.dall@virtualopensystems.com>2013-01-23 19:29:13 +0100
commit5b3e5e5bf230f56309706dfc05fc0cb173cc83aa (patch)
treeda2ed7c2553526469c802c2a7903b1595b8be3ce /arch/arm/kvm/emulate.c
parentKVM: ARM: World-switch implementation (diff)
downloadlinux-5b3e5e5bf230f56309706dfc05fc0cb173cc83aa.tar.xz
linux-5b3e5e5bf230f56309706dfc05fc0cb173cc83aa.zip
KVM: ARM: Emulation framework and CP15 emulation
Adds a new important function in the main KVM/ARM code called handle_exit() which is called from kvm_arch_vcpu_ioctl_run() on returns from guest execution. This function examines the Hyp-Syndrome-Register (HSR), which contains information telling KVM what caused the exit from the guest. Some of the reasons for an exit are CP15 accesses, which are not allowed from the guest and this commit handles these exits by emulating the intended operation in software and skipping the guest instruction. Minor notes about the coproc register reset: 1) We reserve a value of 0 as an invalid cp15 offset, to catch bugs in our table, at cost of 4 bytes per vcpu. 2) Added comments on the table indicating how we handle each register, for simplicity of understanding. Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Christoffer Dall <c.dall@virtualopensystems.com>
Diffstat (limited to 'arch/arm/kvm/emulate.c')
-rw-r--r--arch/arm/kvm/emulate.c218
1 files changed, 218 insertions, 0 deletions
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index 3eadc25e95de..d61450ac6665 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -16,7 +16,13 @@
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include <linux/mm.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_arm.h>
#include <asm/kvm_emulate.h>
+#include <trace/events/kvm.h>
+
+#include "trace.h"
#define VCPU_NR_MODES 6
#define VCPU_REG_OFFSET_USR 0
@@ -153,3 +159,215 @@ u32 *vcpu_spsr(struct kvm_vcpu *vcpu)
BUG();
}
}
+
+/**
+ * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest
+ * @vcpu: the vcpu pointer
+ * @run: the kvm_run structure pointer
+ *
+ * Simply sets the wait_for_interrupts flag on the vcpu structure, which will
+ * halt execution of world-switches and schedule other host processes until
+ * there is an incoming IRQ or FIQ to the VM.
+ */
+int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run)
+{
+ trace_kvm_wfi(*vcpu_pc(vcpu));
+ kvm_vcpu_block(vcpu);
+ return 1;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu: The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+ unsigned long itbits, cond;
+ unsigned long cpsr = *vcpu_cpsr(vcpu);
+ bool is_arm = !(cpsr & PSR_T_BIT);
+
+ BUG_ON(is_arm && (cpsr & PSR_IT_MASK));
+
+ if (!(cpsr & PSR_IT_MASK))
+ return;
+
+ cond = (cpsr & 0xe000) >> 13;
+ itbits = (cpsr & 0x1c00) >> (10 - 2);
+ itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+ /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
+ if ((itbits & 0x7) == 0)
+ itbits = cond = 0;
+ else
+ itbits = (itbits << 1) & 0x1f;
+
+ cpsr &= ~PSR_IT_MASK;
+ cpsr |= cond << 13;
+ cpsr |= (itbits & 0x1c) << (10 - 2);
+ cpsr |= (itbits & 0x3) << 25;
+ *vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+ bool is_thumb;
+
+ is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT);
+ if (is_thumb && !is_wide_instr)
+ *vcpu_pc(vcpu) += 2;
+ else
+ *vcpu_pc(vcpu) += 4;
+ kvm_adjust_itstate(vcpu);
+}
+
+
+/******************************************************************************
+ * Inject exceptions into the guest
+ */
+
+static u32 exc_vector_base(struct kvm_vcpu *vcpu)
+{
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ u32 vbar = vcpu->arch.cp15[c12_VBAR];
+
+ if (sctlr & SCTLR_V)
+ return 0xffff0000;
+ else /* always have security exceptions */
+ return vbar;
+}
+
+/**
+ * kvm_inject_undefined - inject an undefined exception into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ *
+ * Modelled after TakeUndefInstrException() pseudocode.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+ u32 new_lr_value;
+ u32 new_spsr_value;
+ u32 cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset = 4;
+ u32 return_offset = (is_thumb) ? 2 : 4;
+
+ new_spsr_value = cpsr;
+ new_lr_value = *vcpu_pc(vcpu) - return_offset;
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT;
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to UND banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_reg(vcpu, 14) = new_lr_value;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+}
+
+/*
+ * Modelled after TakeDataAbortException() and TakePrefetchAbortException
+ * pseudocode.
+ */
+static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
+{
+ u32 new_lr_value;
+ u32 new_spsr_value;
+ u32 cpsr = *vcpu_cpsr(vcpu);
+ u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ bool is_thumb = (cpsr & PSR_T_BIT);
+ u32 vect_offset;
+ u32 return_offset = (is_thumb) ? 4 : 0;
+ bool is_lpae;
+
+ new_spsr_value = cpsr;
+ new_lr_value = *vcpu_pc(vcpu) + return_offset;
+
+ *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
+ *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
+ *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
+
+ if (sctlr & SCTLR_TE)
+ *vcpu_cpsr(vcpu) |= PSR_T_BIT;
+ if (sctlr & SCTLR_EE)
+ *vcpu_cpsr(vcpu) |= PSR_E_BIT;
+
+ /* Note: These now point to ABT banked copies */
+ *vcpu_spsr(vcpu) = cpsr;
+ *vcpu_reg(vcpu, 14) = new_lr_value;
+
+ if (is_pabt)
+ vect_offset = 12;
+ else
+ vect_offset = 16;
+
+ /* Branch to exception vector */
+ *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
+
+ if (is_pabt) {
+ /* Set DFAR and DFSR */
+ vcpu->arch.cp15[c6_IFAR] = addr;
+ is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ /* Always give debug fault for now - should give guest a clue */
+ if (is_lpae)
+ vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
+ else
+ vcpu->arch.cp15[c5_IFSR] = 2;
+ } else { /* !iabt */
+ /* Set DFAR and DFSR */
+ vcpu->arch.cp15[c6_DFAR] = addr;
+ is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ /* Always give debug fault for now - should give guest a clue */
+ if (is_lpae)
+ vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
+ else
+ vcpu->arch.cp15[c5_DFSR] = 2;
+ }
+
+}
+
+/**
+ * kvm_inject_dabt - inject a data abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ inject_abt(vcpu, false, addr);
+}
+
+/**
+ * kvm_inject_pabt - inject a prefetch abort into the guest
+ * @vcpu: The VCPU to receive the undefined exception
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+{
+ inject_abt(vcpu, true, addr);
+}