summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/hyperv.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-10-09 18:39:55 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2022-10-09 18:39:55 +0200
commitef688f8b8cd3eb20547a6543f03e3d8952b87769 (patch)
treed44f43e19e69bc7828a16a259dd7f42afca02e99 /arch/x86/kvm/hyperv.c
parentMerge tag 'efi-next-for-v6.1' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff)
parentKVM: remove KVM_REQ_UNHALT (diff)
downloadlinux-ef688f8b8cd3eb20547a6543f03e3d8952b87769.tar.xz
linux-ef688f8b8cd3eb20547a6543f03e3d8952b87769.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm updates from Paolo Bonzini: "The first batch of KVM patches, mostly covering x86. ARM: - Account stage2 page table allocations in memory stats x86: - Account EPT/NPT arm64 page table allocations in memory stats - Tracepoint cleanups/fixes for nested VM-Enter and emulated MSR accesses - Drop eVMCS controls filtering for KVM on Hyper-V, all known versions of Hyper-V now support eVMCS fields associated with features that are enumerated to the guest - Use KVM's sanitized VMCS config as the basis for the values of nested VMX capabilities MSRs - A myriad event/exception fixes and cleanups. Most notably, pending exceptions morph into VM-Exits earlier, as soon as the exception is queued, instead of waiting until the next vmentry. This fixed a longstanding issue where the exceptions would incorrecly become double-faults instead of triggering a vmexit; the common case of page-fault vmexits had a special workaround, but now it's fixed for good - A handful of fixes for memory leaks in error paths - Cleanups for VMREAD trampoline and VMX's VM-Exit assembly flow - Never write to memory from non-sleepable kvm_vcpu_check_block() - Selftests refinements and cleanups - Misc typo cleanups Generic: - remove KVM_REQ_UNHALT" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (94 commits) KVM: remove KVM_REQ_UNHALT KVM: mips, x86: do not rely on KVM_REQ_UNHALT KVM: x86: never write to memory from kvm_vcpu_check_block() KVM: x86: Don't snapshot pending INIT/SIPI prior to checking nested events KVM: nVMX: Make event request on VMXOFF iff INIT/SIPI is pending KVM: nVMX: Make an event request if INIT or SIPI is pending on VM-Enter KVM: SVM: Make an event request if INIT or SIPI is pending when GIF is set KVM: x86: lapic does not have to process INIT if it is blocked KVM: x86: Rename kvm_apic_has_events() to make it INIT/SIPI specific KVM: x86: Rename and expose helper to detect if INIT/SIPI are allowed KVM: nVMX: Make an event request when pending an MTF nested VM-Exit KVM: x86: make vendor code check for all nested events mailmap: Update Oliver's email address KVM: x86: Allow force_emulation_prefix to be written without a reload KVM: selftests: Add an x86-only test to verify nested exception queueing KVM: selftests: Use uapi header to get VMX and SVM exit reasons/codes KVM: x86: Rename inject_pending_events() to kvm_check_and_inject_events() KVM: VMX: Update MTF and ICEBP comments to document KVM's subtle behavior KVM: x86: Treat pending TRIPLE_FAULT requests as pending exceptions KVM: x86: Morph pending exceptions to pending VM-Exits at queue time ...
Diffstat (limited to 'arch/x86/kvm/hyperv.c')
-rw-r--r--arch/x86/kvm/hyperv.c70
1 files changed, 32 insertions, 38 deletions
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index ed804447589c..0adf4a437e85 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -38,9 +38,6 @@
#include "irq.h"
#include "fpu.h"
-/* "Hv#1" signature */
-#define HYPERV_CPUID_SIGNATURE_EAX 0x31237648
-
#define KVM_HV_MAX_SPARSE_VCPU_SET_BITS DIV_ROUND_UP(KVM_MAX_VCPUS, 64)
static void stimer_mark_pending(struct kvm_vcpu_hv_stimer *stimer,
@@ -934,11 +931,14 @@ static void stimer_init(struct kvm_vcpu_hv_stimer *stimer, int timer_index)
stimer_prepare_msg(stimer);
}
-static int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
+int kvm_hv_vcpu_init(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu_hv *hv_vcpu;
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
int i;
+ if (hv_vcpu)
+ return 0;
+
hv_vcpu = kzalloc(sizeof(struct kvm_vcpu_hv), GFP_KERNEL_ACCOUNT);
if (!hv_vcpu)
return -ENOMEM;
@@ -962,11 +962,9 @@ int kvm_hv_activate_synic(struct kvm_vcpu *vcpu, bool dont_zero_synic_pages)
struct kvm_vcpu_hv_synic *synic;
int r;
- if (!to_hv_vcpu(vcpu)) {
- r = kvm_hv_vcpu_init(vcpu);
- if (r)
- return r;
- }
+ r = kvm_hv_vcpu_init(vcpu);
+ if (r)
+ return r;
synic = to_hv_synic(vcpu);
@@ -1660,10 +1658,8 @@ int kvm_hv_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;
- if (!to_hv_vcpu(vcpu)) {
- if (kvm_hv_vcpu_init(vcpu))
- return 1;
- }
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
if (kvm_hv_msr_partition_wide(msr)) {
int r;
@@ -1683,10 +1679,8 @@ int kvm_hv_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
if (!host && !vcpu->arch.hyperv_enabled)
return 1;
- if (!to_hv_vcpu(vcpu)) {
- if (kvm_hv_vcpu_init(vcpu))
- return 1;
- }
+ if (kvm_hv_vcpu_init(vcpu))
+ return 1;
if (kvm_hv_msr_partition_wide(msr)) {
int r;
@@ -1987,49 +1981,49 @@ ret_success:
return HV_STATUS_SUCCESS;
}
-void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu)
+void kvm_hv_set_cpuid(struct kvm_vcpu *vcpu, bool hyperv_enabled)
{
+ struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu);
struct kvm_cpuid_entry2 *entry;
- struct kvm_vcpu_hv *hv_vcpu;
- entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_INTERFACE);
- if (entry && entry->eax == HYPERV_CPUID_SIGNATURE_EAX) {
- vcpu->arch.hyperv_enabled = true;
- } else {
- vcpu->arch.hyperv_enabled = false;
+ vcpu->arch.hyperv_enabled = hyperv_enabled;
+
+ if (!hv_vcpu) {
+ /*
+ * KVM should have already allocated kvm_vcpu_hv if Hyper-V is
+ * enabled in CPUID.
+ */
+ WARN_ON_ONCE(vcpu->arch.hyperv_enabled);
return;
}
- if (!to_hv_vcpu(vcpu) && kvm_hv_vcpu_init(vcpu))
- return;
+ memset(&hv_vcpu->cpuid_cache, 0, sizeof(hv_vcpu->cpuid_cache));
- hv_vcpu = to_hv_vcpu(vcpu);
+ if (!vcpu->arch.hyperv_enabled)
+ return;
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
if (entry) {
hv_vcpu->cpuid_cache.features_eax = entry->eax;
hv_vcpu->cpuid_cache.features_ebx = entry->ebx;
hv_vcpu->cpuid_cache.features_edx = entry->edx;
- } else {
- hv_vcpu->cpuid_cache.features_eax = 0;
- hv_vcpu->cpuid_cache.features_ebx = 0;
- hv_vcpu->cpuid_cache.features_edx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
if (entry) {
hv_vcpu->cpuid_cache.enlightenments_eax = entry->eax;
hv_vcpu->cpuid_cache.enlightenments_ebx = entry->ebx;
- } else {
- hv_vcpu->cpuid_cache.enlightenments_eax = 0;
- hv_vcpu->cpuid_cache.enlightenments_ebx = 0;
}
entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
if (entry)
hv_vcpu->cpuid_cache.syndbg_cap_eax = entry->eax;
- else
- hv_vcpu->cpuid_cache.syndbg_cap_eax = 0;
+
+ entry = kvm_find_cpuid_entry(vcpu, HYPERV_CPUID_NESTED_FEATURES);
+ if (entry) {
+ hv_vcpu->cpuid_cache.nested_eax = entry->eax;
+ hv_vcpu->cpuid_cache.nested_ebx = entry->ebx;
+ }
}
int kvm_hv_set_enforce_cpuid(struct kvm_vcpu *vcpu, bool enforce)
@@ -2552,7 +2546,7 @@ int kvm_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
case HYPERV_CPUID_NESTED_FEATURES:
ent->eax = evmcs_ver;
ent->eax |= HV_X64_NESTED_MSR_BITMAP;
-
+ ent->ebx |= HV_X64_NESTED_EVMCS1_PERF_GLOBAL_CTRL;
break;
case HYPERV_CPUID_SYNDBG_VENDOR_AND_MAX_FUNCTIONS: