summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-12-20 21:25:16 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-21 12:02:43 +0100
commit051a2d3e59e51ae49fd56aef34e472832897ce46 (patch)
treea6c583cfe78711a8d9e579f1e1861a20725c640b /arch/x86/kvm
parentMerge tag 'kvm-ppc-next-4.21-2' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
downloadlinux-051a2d3e59e51ae49fd56aef34e472832897ce46.tar.xz
linux-051a2d3e59e51ae49fd56aef34e472832897ce46.zip
KVM: VMX: Explicitly reference RCX as the vmx_vcpu pointer in asm blobs
Use '%% " _ASM_CX"' instead of '%0' to dereference RCX, i.e. the 'struct vcpu_vmx' pointer, in the VM-Enter asm blobs of vmx_vcpu_run() and nested_vmx_check_vmentry_hw(). Using the symbolic name means that adding/removing an output parameter(s) requires "rewriting" almost all of the asm blob, which makes it nearly impossible to understand what's being changed in even the most minor patches. Opportunistically improve the code comments. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx/nested.c6
-rw-r--r--arch/x86/kvm/vmx/vmx.c86
2 files changed, 50 insertions, 42 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d839864aa8b0..da16f022a529 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2759,17 +2759,17 @@ static int __noclone nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
asm(
/* Set HOST_RSP */
__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0)\n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%% " _ASM_CX")\n\t"
/* Check if vmlaunch or vmresume is needed */
- "cmpl $0, %c[launched](%0)\n\t"
+ "cmpl $0, %c[launched](%% " _ASM_CX")\n\t"
"jne 1f\n\t"
__ex("vmlaunch") "\n\t"
"jmp 2f\n\t"
"1: " __ex("vmresume") "\n\t"
"2: "
/* Set vmx->fail accordingly */
- "setbe %c[fail](%0)\n\t"
+ "setbe %c[fail](%% " _ASM_CX")\n\t"
".pushsection .rodata\n\t"
".global vmx_early_consistency_check_return\n\t"
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index d1c818a877df..e82319fbdceb 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6440,9 +6440,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"push %%" _ASM_DX "; push %%" _ASM_BP ";"
"push %%" _ASM_CX " \n\t" /* placeholder for guest rcx */
"push %%" _ASM_CX " \n\t"
- "cmp %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+ "cmp %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
"je 1f \n\t"
- "mov %%" _ASM_SP ", %c[host_rsp](%0) \n\t"
+ "mov %%" _ASM_SP ", %c[host_rsp](%%" _ASM_CX ") \n\t"
/* Avoid VMWRITE when Enlightened VMCS is in use */
"test %%" _ASM_SI ", %%" _ASM_SI " \n\t"
"jz 2f \n\t"
@@ -6452,32 +6452,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
__ex("vmwrite %%" _ASM_SP ", %%" _ASM_DX) "\n\t"
"1: \n\t"
/* Reload cr2 if changed */
- "mov %c[cr2](%0), %%" _ASM_AX " \n\t"
+ "mov %c[cr2](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
"mov %%cr2, %%" _ASM_DX " \n\t"
"cmp %%" _ASM_AX ", %%" _ASM_DX " \n\t"
"je 3f \n\t"
"mov %%" _ASM_AX", %%cr2 \n\t"
"3: \n\t"
/* Check if vmlaunch or vmresume is needed */
- "cmpl $0, %c[launched](%0) \n\t"
+ "cmpl $0, %c[launched](%%" _ASM_CX ") \n\t"
/* Load guest registers. Don't clobber flags. */
- "mov %c[rax](%0), %%" _ASM_AX " \n\t"
- "mov %c[rbx](%0), %%" _ASM_BX " \n\t"
- "mov %c[rdx](%0), %%" _ASM_DX " \n\t"
- "mov %c[rsi](%0), %%" _ASM_SI " \n\t"
- "mov %c[rdi](%0), %%" _ASM_DI " \n\t"
- "mov %c[rbp](%0), %%" _ASM_BP " \n\t"
+ "mov %c[rax](%%" _ASM_CX "), %%" _ASM_AX " \n\t"
+ "mov %c[rbx](%%" _ASM_CX "), %%" _ASM_BX " \n\t"
+ "mov %c[rdx](%%" _ASM_CX "), %%" _ASM_DX " \n\t"
+ "mov %c[rsi](%%" _ASM_CX "), %%" _ASM_SI " \n\t"
+ "mov %c[rdi](%%" _ASM_CX "), %%" _ASM_DI " \n\t"
+ "mov %c[rbp](%%" _ASM_CX "), %%" _ASM_BP " \n\t"
#ifdef CONFIG_X86_64
- "mov %c[r8](%0), %%r8 \n\t"
- "mov %c[r9](%0), %%r9 \n\t"
- "mov %c[r10](%0), %%r10 \n\t"
- "mov %c[r11](%0), %%r11 \n\t"
- "mov %c[r12](%0), %%r12 \n\t"
- "mov %c[r13](%0), %%r13 \n\t"
- "mov %c[r14](%0), %%r14 \n\t"
- "mov %c[r15](%0), %%r15 \n\t"
+ "mov %c[r8](%%" _ASM_CX "), %%r8 \n\t"
+ "mov %c[r9](%%" _ASM_CX "), %%r9 \n\t"
+ "mov %c[r10](%%" _ASM_CX "), %%r10 \n\t"
+ "mov %c[r11](%%" _ASM_CX "), %%r11 \n\t"
+ "mov %c[r12](%%" _ASM_CX "), %%r12 \n\t"
+ "mov %c[r13](%%" _ASM_CX "), %%r13 \n\t"
+ "mov %c[r14](%%" _ASM_CX "), %%r14 \n\t"
+ "mov %c[r15](%%" _ASM_CX "), %%r15 \n\t"
#endif
- "mov %c[rcx](%0), %%" _ASM_CX " \n\t" /* kills %0 (ecx) */
+ /* Load guest RCX. This kills the vmx_vcpu pointer! */
+ "mov %c[rcx](%%" _ASM_CX "), %%" _ASM_CX " \n\t"
/* Enter guest mode */
"jne 1f \n\t"
@@ -6485,26 +6486,33 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"jmp 2f \n\t"
"1: " __ex("vmresume") "\n\t"
"2: "
- /* Save guest registers, load host registers, keep flags */
- "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
- "pop %0 \n\t"
- "setbe %c[fail](%0)\n\t"
- "mov %%" _ASM_AX ", %c[rax](%0) \n\t"
- "mov %%" _ASM_BX ", %c[rbx](%0) \n\t"
- __ASM_SIZE(pop) " %c[rcx](%0) \n\t"
- "mov %%" _ASM_DX ", %c[rdx](%0) \n\t"
- "mov %%" _ASM_SI ", %c[rsi](%0) \n\t"
- "mov %%" _ASM_DI ", %c[rdi](%0) \n\t"
- "mov %%" _ASM_BP ", %c[rbp](%0) \n\t"
+
+ /* Save guest's RCX to the stack placeholder (see above) */
+ "mov %%" _ASM_CX ", %c[wordsize](%%" _ASM_SP ") \n\t"
+
+ /* Load host's RCX, i.e. the vmx_vcpu pointer */
+ "pop %%" _ASM_CX " \n\t"
+
+ /* Set vmx->fail based on EFLAGS.{CF,ZF} */
+ "setbe %c[fail](%%" _ASM_CX ")\n\t"
+
+ /* Save all guest registers, including RCX from the stack */
+ "mov %%" _ASM_AX ", %c[rax](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BX ", %c[rbx](%%" _ASM_CX ") \n\t"
+ __ASM_SIZE(pop) " %c[rcx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DX ", %c[rdx](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_SI ", %c[rsi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_DI ", %c[rdi](%%" _ASM_CX ") \n\t"
+ "mov %%" _ASM_BP ", %c[rbp](%%" _ASM_CX ") \n\t"
#ifdef CONFIG_X86_64
- "mov %%r8, %c[r8](%0) \n\t"
- "mov %%r9, %c[r9](%0) \n\t"
- "mov %%r10, %c[r10](%0) \n\t"
- "mov %%r11, %c[r11](%0) \n\t"
- "mov %%r12, %c[r12](%0) \n\t"
- "mov %%r13, %c[r13](%0) \n\t"
- "mov %%r14, %c[r14](%0) \n\t"
- "mov %%r15, %c[r15](%0) \n\t"
+ "mov %%r8, %c[r8](%%" _ASM_CX ") \n\t"
+ "mov %%r9, %c[r9](%%" _ASM_CX ") \n\t"
+ "mov %%r10, %c[r10](%%" _ASM_CX ") \n\t"
+ "mov %%r11, %c[r11](%%" _ASM_CX ") \n\t"
+ "mov %%r12, %c[r12](%%" _ASM_CX ") \n\t"
+ "mov %%r13, %c[r13](%%" _ASM_CX ") \n\t"
+ "mov %%r14, %c[r14](%%" _ASM_CX ") \n\t"
+ "mov %%r15, %c[r15](%%" _ASM_CX ") \n\t"
/*
* Clear host registers marked as clobbered to prevent
* speculative use.
@@ -6519,7 +6527,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
"xor %%r15d, %%r15d \n\t"
#endif
"mov %%cr2, %%" _ASM_AX " \n\t"
- "mov %%" _ASM_AX ", %c[cr2](%0) \n\t"
+ "mov %%" _ASM_AX ", %c[cr2](%%" _ASM_CX ") \n\t"
"xor %%eax, %%eax \n\t"
"xor %%ebx, %%ebx \n\t"