diff options
author | Sean Christopherson <sean.j.christopherson@intel.com> | 2019-01-25 16:41:17 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-02-20 22:48:17 +0100 |
commit | e75c3c3a0487da878cbfa7f125dcd080a8606eaf (patch) | |
tree | c9115f813adce4028063196664fe1208b410f36e /arch/x86/kvm/vmx/vmenter.S | |
parent | KVM: VMX: Pass @launched to the vCPU-run asm via standard ABI regs (diff) | |
download | linux-e75c3c3a0487da878cbfa7f125dcd080a8606eaf.tar.xz linux-e75c3c3a0487da878cbfa7f125dcd080a8606eaf.zip |
KVM: VMX: Return VM-Fail from vCPU-run assembly via standard ABI reg
...to prepare for making the assembly sub-routine callable from C code.
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmenter.S')
-rw-r--r-- | arch/x86/kvm/vmx/vmenter.S | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index a3d9a8e062f9..e06a3f33311e 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -87,7 +87,7 @@ ENDPROC(vmx_vmexit) * @launched: %true if the VMCS has been launched * * Returns: - * %RBX is 0 on VM-Exit, 1 on VM-Fail + * 0 on VM-Exit, 1 on VM-Fail */ ENTRY(__vmx_vcpu_run) push %_ASM_BP @@ -163,17 +163,17 @@ ENTRY(__vmx_vcpu_run) mov %r15, VCPU_R15(%_ASM_AX) #endif - /* Clear EBX to indicate VM-Exit (as opposed to VM-Fail). */ - xor %ebx, %ebx + /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ + xor %eax, %eax /* - * Clear all general purpose registers except RSP and RBX to prevent + * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded * via the stack. In theory, an L1 cache miss when restoring registers * could lead to speculative execution with the guest's values. * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially - * free. RSP and RBX are exempt as RSP is restored by hardware during - * VM-Exit and RBX is explicitly loaded with 0 or 1 to "return" VM-Fail. + * free. RSP and RAX are exempt as RSP is restored by hardware during + * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. */ 1: #ifdef CONFIG_X86_64 @@ -186,7 +186,7 @@ ENTRY(__vmx_vcpu_run) xor %r14d, %r14d xor %r15d, %r15d #endif - xor %eax, %eax + xor %ebx, %ebx xor %ecx, %ecx xor %edx, %edx xor %esi, %esi @@ -199,6 +199,6 @@ ENTRY(__vmx_vcpu_run) ret /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ -2: mov $1, %ebx +2: mov $1, %eax jmp 1b ENDPROC(__vmx_vcpu_run) |