summaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2013-04-28 10:50:52 +0200
committerGleb Natapov <gleb@redhat.com>2013-04-28 11:44:18 +0200
commit730dca42c1d363c939da18c1499c7327c66e2b37 (patch)
treef94998cccc58814642c4160da1bb75181af6ba68 /arch/x86
parentkvm, svm: Fix typo in printk message (diff)
downloadlinux-730dca42c1d363c939da18c1499c7327c66e2b37.tar.xz
linux-730dca42c1d363c939da18c1499c7327c66e2b37.zip
KVM: x86: Rework request for immediate exit
The VMX implementation of enable_irq_window raised KVM_REQ_IMMEDIATE_EXIT after we checked it in vcpu_enter_guest. This caused infinite loops on vmentry. Fix it by letting enable_irq_window signal the need for an immediate exit via its return value and drop KVM_REQ_IMMEDIATE_EXIT. This issue only affects nested VMX scenarios. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c3
-rw-r--r--arch/x86/kvm/vmx.c15
-rw-r--r--arch/x86/kvm/x86.c7
4 files changed, 14 insertions, 13 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 18635ae42a8e..111b4a0c3907 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -694,7 +694,7 @@ struct kvm_x86_ops {
bool (*get_nmi_mask)(struct kvm_vcpu *vcpu);
void (*set_nmi_mask)(struct kvm_vcpu *vcpu, bool masked);
void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
- void (*enable_irq_window)(struct kvm_vcpu *vcpu);
+ int (*enable_irq_window)(struct kvm_vcpu *vcpu);
void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
int (*vm_has_apicv)(struct kvm *kvm);
void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 15c9cccd716b..7f896cbe717f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3632,7 +3632,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
return ret;
}
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static int enable_irq_window(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
@@ -3646,6 +3646,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
svm_set_vintr(svm);
svm_inject_irq(svm, 0x0);
}
+ return 0;
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0f0cb3110626..74c525e2c608 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4398,22 +4398,23 @@ static bool nested_exit_on_nmi(struct kvm_vcpu *vcpu)
PIN_BASED_NMI_EXITING;
}
-static void enable_irq_window(struct kvm_vcpu *vcpu)
+static int enable_irq_window(struct kvm_vcpu *vcpu)
{
u32 cpu_based_vm_exec_control;
- if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) {
+
+ if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
/*
* We get here if vmx_interrupt_allowed() said we can't
- * inject to L1 now because L2 must run. Ask L2 to exit
- * right after entry, so we can inject to L1 more promptly.
+ * inject to L1 now because L2 must run. The caller will have
+ * to make L2 exit right after entry, so we can inject to L1
+ * more promptly.
*/
- kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
- return;
- }
+ return -EBUSY;
cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING;
vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+ return 0;
}
static void enable_nmi_window(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2a434bf3918d..c522260b5bbf 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5692,7 +5692,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
int r;
bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
vcpu->run->request_interrupt_window;
- bool req_immediate_exit = 0;
+ bool req_immediate_exit = false;
if (vcpu->requests) {
if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5734,8 +5734,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
record_steal_time(vcpu);
if (kvm_check_request(KVM_REQ_NMI, vcpu))
process_nmi(vcpu);
- req_immediate_exit =
- kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
if (kvm_check_request(KVM_REQ_PMU, vcpu))
kvm_handle_pmu_event(vcpu);
if (kvm_check_request(KVM_REQ_PMI, vcpu))
@@ -5757,7 +5755,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
if (vcpu->arch.nmi_pending)
kvm_x86_ops->enable_nmi_window(vcpu);
else if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)
- kvm_x86_ops->enable_irq_window(vcpu);
+ req_immediate_exit =
+ kvm_x86_ops->enable_irq_window(vcpu) != 0;
if (kvm_lapic_enabled(vcpu)) {
/*