summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/smm.c
diff options
context:
space:
mode:
authorMaxim Levitsky <mlevitsk@redhat.com>2022-10-25 14:47:35 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2022-11-09 18:31:23 +0100
commit58c1d206d545464f9051ad080674b719d553215b (patch)
treea6b717847f3d1409d8486ab4650ab6a23ec3bae2 /arch/x86/kvm/smm.c
parentKVM: x86: smm: add structs for KVM's smram layout (diff)
downloadlinux-58c1d206d545464f9051ad080674b719d553215b.tar.xz
linux-58c1d206d545464f9051ad080674b719d553215b.zip
KVM: x86: smm: use smram structs in the common code
Use kvm_smram union instad of raw arrays in the common smm code. Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com> Message-Id: <20221025124741.228045-18-mlevitsk@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/smm.c')
-rw-r--r--arch/x86/kvm/smm.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/arch/x86/kvm/smm.c b/arch/x86/kvm/smm.c
index 2e6ec79a581e..ba2733e535fd 100644
--- a/arch/x86/kvm/smm.c
+++ b/arch/x86/kvm/smm.c
@@ -295,17 +295,18 @@ void enter_smm(struct kvm_vcpu *vcpu)
struct kvm_segment cs, ds;
struct desc_ptr dt;
unsigned long cr0;
- char buf[512];
+ union kvm_smram smram;
check_smram_offsets();
- memset(buf, 0, 512);
+ memset(smram.bytes, 0, sizeof(smram.bytes));
+
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- enter_smm_save_state_64(vcpu, buf);
+ enter_smm_save_state_64(vcpu, smram.bytes);
else
#endif
- enter_smm_save_state_32(vcpu, buf);
+ enter_smm_save_state_32(vcpu, smram.bytes);
/*
* Give enter_smm() a chance to make ISA-specific changes to the vCPU
@@ -315,12 +316,12 @@ void enter_smm(struct kvm_vcpu *vcpu)
* Kill the VM in the unlikely case of failure, because the VM
* can be in undefined state in this case.
*/
- if (static_call(kvm_x86_enter_smm)(vcpu, buf))
+ if (static_call(kvm_x86_enter_smm)(vcpu, &smram))
goto error;
kvm_smm_changed(vcpu, true);
- if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, buf, sizeof(buf)))
+ if (kvm_vcpu_write_guest(vcpu, vcpu->arch.smbase + 0xfe00, &smram, sizeof(smram)))
goto error;
if (static_call(kvm_x86_get_nmi_mask)(vcpu))
@@ -480,7 +481,7 @@ static int rsm_enter_protected_mode(struct kvm_vcpu *vcpu,
}
static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
- const char *smstate)
+ u8 *smstate)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
struct kvm_segment desc;
@@ -541,7 +542,7 @@ static int rsm_load_state_32(struct x86_emulate_ctxt *ctxt,
#ifdef CONFIG_X86_64
static int rsm_load_state_64(struct x86_emulate_ctxt *ctxt,
- const char *smstate)
+ u8 *smstate)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
struct kvm_segment desc;
@@ -612,13 +613,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
{
struct kvm_vcpu *vcpu = ctxt->vcpu;
unsigned long cr0;
- char buf[512];
+ union kvm_smram smram;
u64 smbase;
int ret;
smbase = vcpu->arch.smbase;
- ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, buf, sizeof(buf));
+ ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfe00, smram.bytes, sizeof(smram));
if (ret < 0)
return X86EMUL_UNHANDLEABLE;
@@ -675,13 +676,13 @@ int emulator_leave_smm(struct x86_emulate_ctxt *ctxt)
* state (e.g. enter guest mode) before loading state from the SMM
* state-save area.
*/
- if (static_call(kvm_x86_leave_smm)(vcpu, buf))
+ if (static_call(kvm_x86_leave_smm)(vcpu, &smram))
return X86EMUL_UNHANDLEABLE;
#ifdef CONFIG_X86_64
if (guest_cpuid_has(vcpu, X86_FEATURE_LM))
- return rsm_load_state_64(ctxt, buf);
+ return rsm_load_state_64(ctxt, smram.bytes);
else
#endif
- return rsm_load_state_32(ctxt, buf);
+ return rsm_load_state_32(ctxt, smram.bytes);
}