diff options
Diffstat (limited to 'arch/x86/kvm/cpuid.h')
-rw-r--r-- | arch/x86/kvm/cpuid.h | 151 |
1 files changed, 131 insertions, 20 deletions
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index 7366c618aa04..63a70f6a3df3 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h @@ -6,8 +6,10 @@ #include <asm/cpu.h> #include <asm/processor.h> +extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; +void kvm_set_cpu_caps(void); + int kvm_update_cpuid(struct kvm_vcpu *vcpu); -bool kvm_mpx_supported(void); struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, u32 function, u32 index); int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, @@ -23,7 +25,7 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid, struct kvm_cpuid_entry2 __user *entries); bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx, - u32 *ecx, u32 *edx, bool check_limit); + u32 *ecx, u32 *edx, bool exact_only); int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu); @@ -64,7 +66,7 @@ static const struct cpuid_reg reverse_cpuid[] = { * and can't be used by KVM to query/control guest capabilities. And obviously * the leaf being queried must have an entry in the lookup table. */ -static __always_inline void reverse_cpuid_check(unsigned x86_leaf) +static __always_inline void reverse_cpuid_check(unsigned int x86_leaf) { BUILD_BUG_ON(x86_leaf == CPUID_LNX_1); BUILD_BUG_ON(x86_leaf == CPUID_LNX_2); @@ -88,24 +90,18 @@ static __always_inline u32 __feature_bit(int x86_feature) #define feature_bit(name) __feature_bit(X86_FEATURE_##name) -static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature) +static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned int x86_feature) { - unsigned x86_leaf = x86_feature / 32; + unsigned int x86_leaf = x86_feature / 32; reverse_cpuid_check(x86_leaf); return reverse_cpuid[x86_leaf]; } -static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature) +static __always_inline u32 *__cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, + u32 reg) { - struct kvm_cpuid_entry2 *entry; - const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); - - entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); - if (!entry) - return NULL; - - switch (cpuid.reg) { + switch (reg) { case CPUID_EAX: return &entry->eax; case CPUID_EBX: @@ -120,9 +116,86 @@ static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsi } } -static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature) +static __always_inline u32 *cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); + + return __cpuid_entry_get_reg(entry, cpuid.reg); +} + +static __always_inline u32 cpuid_entry_get(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + return *reg & __feature_bit(x86_feature); +} + +static __always_inline bool cpuid_entry_has(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + return cpuid_entry_get(entry, x86_feature); +} + +static __always_inline void cpuid_entry_clear(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + *reg &= ~__feature_bit(x86_feature); +} + +static __always_inline void cpuid_entry_set(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + *reg |= __feature_bit(x86_feature); +} + +static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, + unsigned int x86_feature, + bool set) +{ + u32 *reg = cpuid_entry_get_reg(entry, x86_feature); + + /* + * Open coded instead of using cpuid_entry_{clear,set}() to coerce the + * compiler into using CMOV instead of Jcc when possible. + */ + if (set) + *reg |= __feature_bit(x86_feature); + else + *reg &= ~__feature_bit(x86_feature); +} + +static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry, + enum cpuid_leafs leaf) +{ + u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); + + BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); + *reg = kvm_cpu_caps[leaf]; +} + +static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { - int *reg; + const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature); + struct kvm_cpuid_entry2 *entry; + + entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index); + if (!entry) + return NULL; + + return __cpuid_entry_get_reg(entry, cpuid.reg); +} + +static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, + unsigned int x86_feature) +{ + u32 *reg; reg = guest_cpuid_get_register(vcpu, x86_feature); if (!reg) @@ -131,21 +204,24 @@ static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_ return *reg & __feature_bit(x86_feature); } -static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature) +static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, + unsigned int x86_feature) { - int *reg; + u32 *reg; reg = guest_cpuid_get_register(vcpu, x86_feature); if (reg) *reg &= ~__feature_bit(x86_feature); } -static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu) +static inline bool guest_cpuid_is_amd_or_hygon(struct kvm_vcpu *vcpu) { struct kvm_cpuid_entry2 *best; best = kvm_find_cpuid_entry(vcpu, 0, 0); - return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx; + return best && + (is_guest_vendor_amd(best->ebx, best->ecx, best->edx) || + is_guest_vendor_hygon(best->ebx, best->ecx, best->edx)); } static inline int guest_cpuid_family(struct kvm_vcpu *vcpu) @@ -192,4 +268,39 @@ static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu) MSR_MISC_FEATURES_ENABLES_CPUID_FAULT; } +static __always_inline void kvm_cpu_cap_clear(unsigned int x86_feature) +{ + unsigned int x86_leaf = x86_feature / 32; + + reverse_cpuid_check(x86_leaf); + kvm_cpu_caps[x86_leaf] &= ~__feature_bit(x86_feature); +} + +static __always_inline void kvm_cpu_cap_set(unsigned int x86_feature) +{ + unsigned int x86_leaf = x86_feature / 32; + + reverse_cpuid_check(x86_leaf); + kvm_cpu_caps[x86_leaf] |= __feature_bit(x86_feature); +} + +static __always_inline u32 kvm_cpu_cap_get(unsigned int x86_feature) +{ + unsigned int x86_leaf = x86_feature / 32; + + reverse_cpuid_check(x86_leaf); + return kvm_cpu_caps[x86_leaf] & __feature_bit(x86_feature); +} + +static __always_inline bool kvm_cpu_cap_has(unsigned int x86_feature) +{ + return !!kvm_cpu_cap_get(x86_feature); +} + +static __always_inline void kvm_cpu_cap_check_and_set(unsigned int x86_feature) +{ + if (boot_cpu_has(x86_feature)) + kvm_cpu_cap_set(x86_feature); +} + #endif |