summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/handle_exit.c
diff options
context:
space:
mode:
authorLongpeng(Mike) <longpeng2@huawei.com>2017-08-08 06:05:32 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2017-08-08 10:57:43 +0200
commit199b5763d329b43c88f6ad539db8a6c6b42f8edb (patch)
tree224f4ebeb9e58c43ea1916e63e813452f81dd45a /arch/arm64/kvm/handle_exit.c
parentKVM: x86: use general helpers for some cpuid manipulation (diff)
downloadlinux-199b5763d329b43c88f6ad539db8a6c6b42f8edb.tar.xz
linux-199b5763d329b43c88f6ad539db8a6c6b42f8edb.zip
KVM: add spinlock optimization framework
If a vcpu exits due to request a user mode spinlock, then the spinlock-holder may be preempted in user mode or kernel mode. (Note that not all architectures trap spin loops in user mode, only AMD x86 and ARM/ARM64 currently do). But if a vcpu exits in kernel mode, then the holder must be preempted in kernel mode, so we should choose a vcpu in kernel mode as a more likely candidate for the lock holder. This introduces kvm_arch_vcpu_in_kernel() to decide whether the vcpu is in kernel-mode when it's preempted. kvm_vcpu_on_spin's new argument says the same of the spinning VCPU. Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/arm64/kvm/handle_exit.c')
-rw-r--r--arch/arm64/kvm/handle_exit.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 17d8a1677a0b..da57622cacca 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -84,7 +84,7 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
vcpu->stat.wfe_exit_stat++;
- kvm_vcpu_on_spin(vcpu);
+ kvm_vcpu_on_spin(vcpu, false);
} else {
trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
vcpu->stat.wfi_exit_stat++;