diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2019-12-10 23:44:16 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2020-01-21 13:58:16 +0100 |
commit | 345599f9a292899bf5474651f3cea9b7a0576436 (patch) | |
tree | 9da0cec97792658b18d01eae6b38ed5d99a1d80f | |
parent | KVM: x86: Drop special XSAVE handling from guest_cpuid_has() (diff) | |
download | linux-345599f9a292899bf5474651f3cea9b7a0576436.tar.xz linux-345599f9a292899bf5474651f3cea9b7a0576436.zip |
KVM: x86: Add macro to ensure reserved cr4 bits checks stay in sync
Add a helper macro to generate the set of reserved cr4 bits for both
host and guest to ensure that adding a check on guest capabilities is
also added for host capabilities, and vice versa.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/x86.c | 65 |
1 files changed, 25 insertions, 40 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 960b886e1e43..456fc131c95e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -881,31 +881,34 @@ int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) } EXPORT_SYMBOL_GPL(kvm_set_xcr); +#define __cr4_reserved_bits(__cpu_has, __c) \ +({ \ + u64 __reserved_bits = CR4_RESERVED_BITS; \ + \ + if (!__cpu_has(__c, X86_FEATURE_XSAVE)) \ + __reserved_bits |= X86_CR4_OSXSAVE; \ + if (!__cpu_has(__c, X86_FEATURE_SMEP)) \ + __reserved_bits |= X86_CR4_SMEP; \ + if (!__cpu_has(__c, X86_FEATURE_SMAP)) \ + __reserved_bits |= X86_CR4_SMAP; \ + if (!__cpu_has(__c, X86_FEATURE_FSGSBASE)) \ + __reserved_bits |= X86_CR4_FSGSBASE; \ + if (!__cpu_has(__c, X86_FEATURE_PKU)) \ + __reserved_bits |= X86_CR4_PKE; \ + if (!__cpu_has(__c, X86_FEATURE_LA57)) \ + __reserved_bits |= X86_CR4_LA57; \ + __reserved_bits; \ +}) + static u64 kvm_host_cr4_reserved_bits(struct cpuinfo_x86 *c) { - u64 reserved_bits = CR4_RESERVED_BITS; - - if (!cpu_has(c, X86_FEATURE_XSAVE)) - reserved_bits |= X86_CR4_OSXSAVE; - - if (!cpu_has(c, X86_FEATURE_SMEP)) - reserved_bits |= X86_CR4_SMEP; - - if (!cpu_has(c, X86_FEATURE_SMAP)) - reserved_bits |= X86_CR4_SMAP; - - if (!cpu_has(c, X86_FEATURE_FSGSBASE)) - reserved_bits |= X86_CR4_FSGSBASE; + u64 reserved_bits = __cr4_reserved_bits(cpu_has, c); - if (!cpu_has(c, X86_FEATURE_PKU)) - reserved_bits |= X86_CR4_PKE; + if (cpuid_ecx(0x7) & bit(X86_FEATURE_LA57)) + reserved_bits &= ~X86_CR4_LA57; - if (!cpu_has(c, X86_FEATURE_LA57) && - !(cpuid_ecx(0x7) & bit(X86_FEATURE_LA57))) - reserved_bits |= X86_CR4_LA57; - - if (!cpu_has(c, X86_FEATURE_UMIP) && !kvm_x86_ops->umip_emulated()) - reserved_bits |= X86_CR4_UMIP; + if (kvm_x86_ops->umip_emulated()) + reserved_bits &= ~X86_CR4_UMIP; return reserved_bits; } @@ -915,25 +918,7 @@ static int kvm_valid_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (cr4 & cr4_reserved_bits) return -EINVAL; - if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) && (cr4 & X86_CR4_OSXSAVE)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_SMEP) && (cr4 & X86_CR4_SMEP)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_SMAP) && (cr4 & X86_CR4_SMAP)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_FSGSBASE) && (cr4 & X86_CR4_FSGSBASE)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_PKU) && (cr4 & X86_CR4_PKE)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57)) - return -EINVAL; - - if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP)) + if (cr4 & __cr4_reserved_bits(guest_cpuid_has, vcpu)) return -EINVAL; return 0; |