diff options
author | Longpeng(Mike) <longpeng2@huawei.com> | 2017-08-08 06:05:32 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-08-08 10:57:43 +0200 |
commit | 199b5763d329b43c88f6ad539db8a6c6b42f8edb (patch) | |
tree | 224f4ebeb9e58c43ea1916e63e813452f81dd45a /include | |
parent | KVM: x86: use general helpers for some cpuid manipulation (diff) | |
download | linux-199b5763d329b43c88f6ad539db8a6c6b42f8edb.tar.xz linux-199b5763d329b43c88f6ad539db8a6c6b42f8edb.zip |
KVM: add spinlock optimization framework
If a vcpu exits due to request a user mode spinlock, then
the spinlock-holder may be preempted in user mode or kernel mode.
(Note that not all architectures trap spin loops in user mode,
only AMD x86 and ARM/ARM64 currently do).
But if a vcpu exits in kernel mode, then the holder must be
preempted in kernel mode, so we should choose a vcpu in kernel mode
as a more likely candidate for the lock holder.
This introduces kvm_arch_vcpu_in_kernel() to decide whether the
vcpu is in kernel-mode when it's preempted. kvm_vcpu_on_spin's
new argument says the same of the spinning VCPU.
Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/kvm_host.h | 3 |
1 files changed, 2 insertions, 1 deletions
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 28112d7917c1..6882538eda32 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -720,7 +720,7 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); void kvm_vcpu_kick(struct kvm_vcpu *vcpu); int kvm_vcpu_yield_to(struct kvm_vcpu *target); -void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); +void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); @@ -800,6 +800,7 @@ int kvm_arch_hardware_setup(void); void kvm_arch_hardware_unsetup(void); void kvm_arch_check_processor_compat(void *rtn); int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); +bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); #ifndef __KVM_HAVE_ARCH_VM_ALLOC |