diff options
author | Jan Beulich <JBeulich@suse.com> | 2017-02-03 10:03:25 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2017-03-01 10:16:35 +0100 |
commit | 2140a9942b84dd4bf559dd1215b8f43c36ece5b5 (patch) | |
tree | bcb70908e5e9a332da019ff33ce2e1aec21f1533 /arch | |
parent | x86/entry/32: Relax a pvops stub clobber specification (diff) | |
download | linux-2140a9942b84dd4bf559dd1215b8f43c36ece5b5.tar.xz linux-2140a9942b84dd4bf559dd1215b8f43c36ece5b5.zip |
x86/entry/64: Relax pvops stub clobber specifications
Except for the error_exit case, none of the code paths following the
{DIS,EN}ABLE_INTERRUPTS() invocations being modified here make any
assumptions on register values, so all registers can be clobbered
there. In the error_exit case a minor adjustment to register usage
(at once eliminating an instruction) also allows for this to be true.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/5894556D02000078001366D3@prv-mh.provo.novell.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/entry/entry_64.S | 15 |
1 files changed, 7 insertions, 8 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 044d18ebc43c..d2b2a2948ffe 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -212,7 +212,7 @@ entry_SYSCALL_64_fastpath: * If we see that no exit work is required (which we are required * to check with IRQs off), then we can go straight to SYSRET64. */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF movq PER_CPU_VAR(current_task), %r11 testl $_TIF_ALLWORK_MASK, TASK_TI_flags(%r11) @@ -233,7 +233,7 @@ entry_SYSCALL_64_fastpath: * raise(3) will trigger this, for example. IRQs are off. */ TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) + ENABLE_INTERRUPTS(CLBR_ANY) SAVE_EXTRA_REGS movq %rsp, %rdi call syscall_return_slowpath /* returns with IRQs disabled */ @@ -343,7 +343,7 @@ ENTRY(stub_ptregs_64) * Called from fast path -- disable IRQs again, pop return address * and jump to slow path */ - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF popq %rax jmp entry_SYSCALL64_slow_path @@ -518,7 +518,7 @@ common_interrupt: interrupt do_IRQ /* 0(%rsp): old RSP */ ret_from_intr: - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF decl PER_CPU_VAR(irq_count) @@ -1051,7 +1051,7 @@ END(paranoid_entry) * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */ ENTRY(paranoid_exit) - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF_DEBUG testl %ebx, %ebx /* swapgs needed? */ jnz paranoid_exit_no_swapgs @@ -1156,10 +1156,9 @@ END(error_entry) * 0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode */ ENTRY(error_exit) - movl %ebx, %eax - DISABLE_INTERRUPTS(CLBR_NONE) + DISABLE_INTERRUPTS(CLBR_ANY) TRACE_IRQS_OFF - testl %eax, %eax + testl %ebx, %ebx jnz retint_kernel jmp retint_user END(error_exit) |