diff options
author | Sean Christopherson <seanjc@google.com> | 2021-04-22 04:38:31 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-04-26 11:27:10 +0200 |
commit | dbdd096a5a74b94f6b786a47baef2085859b0dce (patch) | |
tree | 4468a4de6fb69bd9d7d5fcb1adbfd57a8764802a | |
parent | KVM: SVM: Delay restoration of host MSR_TSC_AUX until return to userspace (diff) | |
download | linux-dbdd096a5a74b94f6b786a47baef2085859b0dce.tar.xz linux-dbdd096a5a74b94f6b786a47baef2085859b0dce.zip |
KVM: VMX: Intercept FS/GS_BASE MSR accesses for 32-bit KVM
Disable pass-through of the FS and GS base MSRs for 32-bit KVM. Intel's
SDM unequivocally states that the MSRs exist if and only if the CPU
supports x86-64. FS_BASE and GS_BASE are mostly a non-issue; a clever
guest could opportunistically use the MSRs without issue. KERNEL_GS_BASE
is a bigger problem, as a clever guest would subtly be broken if it were
migrated, as KVM disallows software access to the MSRs, and unlike the
direct variants, KERNEL_GS_BASE needs to be explicitly migrated as it's
not captured in the VMCS.
Fixes: 25c5f225beda ("KVM: VMX: Enable MSR Bitmap feature")
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20210422023831.3473491-1-seanjc@google.com>
[*NOT* for stable kernels. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx/nested.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 4 |
2 files changed, 6 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 8b111682fe5c..0f8c118ebc35 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -614,6 +614,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, } /* KVM unconditionally exposes the FS/GS base MSRs to L1. */ +#ifdef CONFIG_X86_64 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, MSR_FS_BASE, MSR_TYPE_RW); @@ -622,6 +623,7 @@ static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu, nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); +#endif /* * Checking the L0->L1 bitmap is trying to verify two things: diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 026fe770a5cd..edc23c77be32 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -157,9 +157,11 @@ static u32 vmx_possible_passthrough_msrs[MAX_POSSIBLE_PASSTHROUGH_MSRS] = { MSR_IA32_SPEC_CTRL, MSR_IA32_PRED_CMD, MSR_IA32_TSC, +#ifdef CONFIG_X86_64 MSR_FS_BASE, MSR_GS_BASE, MSR_KERNEL_GS_BASE, +#endif MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, @@ -6956,9 +6958,11 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) bitmap_fill(vmx->shadow_msr_intercept.write, MAX_POSSIBLE_PASSTHROUGH_MSRS); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_TSC, MSR_TYPE_R); +#ifdef CONFIG_X86_64 vmx_disable_intercept_for_msr(vcpu, MSR_FS_BASE, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_GS_BASE, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_KERNEL_GS_BASE, MSR_TYPE_RW); +#endif vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_CS, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_ESP, MSR_TYPE_RW); vmx_disable_intercept_for_msr(vcpu, MSR_IA32_SYSENTER_EIP, MSR_TYPE_RW); |