summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-04-27 14:33:43 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2017-04-27 14:36:44 +0200
commit7a97cec26b94c909f4cbad2dc3186af3e457a522 (patch)
treece711b80c36bb3c2828cc63b9f65187fa95e2305 /virt
parentKVM: return if kvm_vcpu_wake_up() did wake up the VCPU (diff)
downloadlinux-7a97cec26b94c909f4cbad2dc3186af3e457a522.tar.xz
linux-7a97cec26b94c909f4cbad2dc3186af3e457a522.zip
KVM: mark requests that need synchronization
kvm_make_all_requests() provides a synchronization that waits until all kicked VCPUs have acknowledged the kick. This is important for KVM_REQ_MMU_RELOAD as it prevents freeing while lockless paging is underway. This patch adds the synchronization property into all requests that are currently being used with kvm_make_all_requests() in order to preserve the current behavior and only introduce a new framework. Removing it from requests where it is not necessary is left for future patches. Signed-off-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c25
1 files changed, 22 insertions, 3 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 632f7b3e198c..035bc51f656f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -165,6 +165,24 @@ void vcpu_put(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(vcpu_put);
+/* TODO: merge with kvm_arch_vcpu_should_kick */
+static bool kvm_request_needs_ipi(struct kvm_vcpu *vcpu, unsigned req)
+{
+ int mode = kvm_vcpu_exiting_guest_mode(vcpu);
+
+ /*
+ * We need to wait for the VCPU to reenable interrupts and get out of
+ * READING_SHADOW_PAGE_TABLES mode.
+ */
+ if (req & KVM_REQUEST_WAIT)
+ return mode != OUTSIDE_GUEST_MODE;
+
+ /*
+ * Need to kick a running VCPU, but otherwise there is nothing to do.
+ */
+ return mode == IN_GUEST_MODE;
+}
+
static void ack_flush(void *_completed)
{
}
@@ -174,6 +192,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
int i, cpu, me;
cpumask_var_t cpus;
bool called = true;
+ bool wait = req & KVM_REQUEST_WAIT;
struct kvm_vcpu *vcpu;
zalloc_cpumask_var(&cpus, GFP_ATOMIC);
@@ -187,13 +206,13 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req)
continue;
if (cpus != NULL && cpu != -1 && cpu != me &&
- kvm_vcpu_exiting_guest_mode(vcpu) != OUTSIDE_GUEST_MODE)
+ kvm_request_needs_ipi(vcpu, req))
cpumask_set_cpu(cpu, cpus);
}
if (unlikely(cpus == NULL))
- smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
+ smp_call_function_many(cpu_online_mask, ack_flush, NULL, wait);
else if (!cpumask_empty(cpus))
- smp_call_function_many(cpus, ack_flush, NULL, 1);
+ smp_call_function_many(cpus, ack_flush, NULL, wait);
else
called = false;
put_cpu();