summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2016-05-20 15:25:28 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2016-05-20 16:26:38 +0200
commit35a2d58588f0992627e74b447ccab21570544c86 (patch)
tree823643adbca0b1aece727a932ea0cbe4c5809321 /arch
parentKVM: arm/arm64: vgic-new: enable build (diff)
downloadlinux-35a2d58588f0992627e74b447ccab21570544c86.tar.xz
linux-35a2d58588f0992627e74b447ccab21570544c86.zip
KVM: arm/arm64: vgic-new: Synchronize changes to active state
When modifying the active state of an interrupt via the MMIO interface, we should ensure that the write has the intended effect. If a guest sets an interrupt to active, but that interrupt is already flushed into a list register on a running VCPU, then that VCPU will write the active state back into the struct vgic_irq upon returning from the guest and syncing its state. This is a non-benign race, because the guest can observe that an interrupt is not active, and it can have a reasonable expectations that other VCPUs will not ack any IRQs, and then set the state to active, and expect it to stay that way. Currently we are not honoring this case. Thefore, change both the SACTIVE and CACTIVE mmio handlers to stop the world, change the irq state, potentially queue the irq if we're setting it to active, and then continue. We take this chance to slightly optimize these functions by not stopping the world when touching private interrupts where there is inherently no possible race. Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/include/asm/kvm_host.h2
-rw-r--r--arch/arm/kvm/arm.c8
-rw-r--r--arch/arm64/include/asm/kvm_host.h2
3 files changed, 11 insertions, 1 deletions
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 832be033e654..987da9fb14d8 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -229,6 +229,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index e89329d22c18..5a599c208f23 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -502,7 +502,13 @@ void kvm_arm_halt_guest(struct kvm *kvm)
kvm_make_all_cpus_request(kvm, KVM_REQ_VCPU_EXIT);
}
-static void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pause = true;
+ kvm_vcpu_kick(vcpu);
+}
+
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu)
{
struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fa94f91812ab..38746d24466d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -329,6 +329,8 @@ struct kvm_vcpu *kvm_arm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
void kvm_arm_halt_guest(struct kvm *kvm);
void kvm_arm_resume_guest(struct kvm *kvm);
+void kvm_arm_halt_vcpu(struct kvm_vcpu *vcpu);
+void kvm_arm_resume_vcpu(struct kvm_vcpu *vcpu);
u64 __kvm_call_hyp(void *hypfn, ...);
#define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__)