diff options
author | David Matlack <dmatlack@google.com> | 2022-11-17 01:16:56 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2022-11-17 16:49:14 +0100 |
commit | 175d5dc79dcb58d18e11192656eefa27abb3fd33 (patch) | |
tree | 06b7980324a976c97ea31b9660b5b6b57bea7b27 /virt | |
parent | KVM: Cap vcpu->halt_poll_ns before halting rather than after (diff) | |
download | linux-175d5dc79dcb58d18e11192656eefa27abb3fd33.tar.xz linux-175d5dc79dcb58d18e11192656eefa27abb3fd33.zip |
KVM: Avoid re-reading kvm->max_halt_poll_ns during halt-polling
Avoid re-reading kvm->max_halt_poll_ns multiple times during
halt-polling except when it is explicitly useful, e.g. to check if the
max time changed across a halt. kvm->max_halt_poll_ns can be changed at
any time by userspace via KVM_CAP_HALT_POLL.
This bug is unlikely to cause any serious side-effects. In the worst
case one halt polls for shorter or longer than it should, and then is
fixed up on the next halt. Furthmore, this is still possible since
kvm->max_halt_poll_ns are not synchronized with halts.
Fixes: acd05785e48c ("kvm: add capability for halt polling")
Signed-off-by: David Matlack <dmatlack@google.com>
Message-Id: <20221117001657.1067231-3-dmatlack@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 2a7285c14013..032d2fa301f5 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3480,6 +3480,11 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, } } +static unsigned int kvm_vcpu_max_halt_poll_ns(struct kvm_vcpu *vcpu) +{ + return READ_ONCE(vcpu->kvm->max_halt_poll_ns); +} + /* * Emulate a vCPU halt condition, e.g. HLT on x86, WFI on arm, etc... If halt * polling is enabled, busy wait for a short time before blocking to avoid the @@ -3488,14 +3493,15 @@ static inline void update_halt_poll_stats(struct kvm_vcpu *vcpu, ktime_t start, */ void kvm_vcpu_halt(struct kvm_vcpu *vcpu) { + unsigned int max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); bool halt_poll_allowed = !kvm_arch_no_poll(vcpu); ktime_t start, cur, poll_end; bool waited = false; bool do_halt_poll; u64 halt_ns; - if (vcpu->halt_poll_ns > vcpu->kvm->max_halt_poll_ns) - vcpu->halt_poll_ns = vcpu->kvm->max_halt_poll_ns; + if (vcpu->halt_poll_ns > max_halt_poll_ns) + vcpu->halt_poll_ns = max_halt_poll_ns; do_halt_poll = halt_poll_allowed && vcpu->halt_poll_ns; @@ -3537,18 +3543,21 @@ out: update_halt_poll_stats(vcpu, start, poll_end, !waited); if (halt_poll_allowed) { + /* Recompute the max halt poll time in case it changed. */ + max_halt_poll_ns = kvm_vcpu_max_halt_poll_ns(vcpu); + if (!vcpu_valid_wakeup(vcpu)) { shrink_halt_poll_ns(vcpu); - } else if (vcpu->kvm->max_halt_poll_ns) { + } else if (max_halt_poll_ns) { if (halt_ns <= vcpu->halt_poll_ns) ; /* we had a long block, shrink polling */ else if (vcpu->halt_poll_ns && - halt_ns > vcpu->kvm->max_halt_poll_ns) + halt_ns > max_halt_poll_ns) shrink_halt_poll_ns(vcpu); /* we had a short halt and our poll time is too small */ - else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && - halt_ns < vcpu->kvm->max_halt_poll_ns) + else if (vcpu->halt_poll_ns < max_halt_poll_ns && + halt_ns < max_halt_poll_ns) grow_halt_poll_ns(vcpu); } else { vcpu->halt_poll_ns = 0; |