summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2015-07-03 21:44:19 +0200
committerIngo Molnar <mingo@kernel.org>2015-07-07 10:58:30 +0200
commit5e99cb7c35ca0580da8e892f91c655d35ecf8798 (patch)
tree3d8c48af3e328fc6691757dbfa2adec2f596caeb
parentx86/entry, selftests/x86: Add a test for 32-bit fast syscall arg faults (diff)
downloadlinux-5e99cb7c35ca0580da8e892f91c655d35ecf8798.tar.xz
linux-5e99cb7c35ca0580da8e892f91c655d35ecf8798.zip
x86/entry/64/compat: Fix bad fast syscall arg failure path
If user code does SYSCALL32 or SYSENTER without a valid stack, then our attempt to determine the syscall args will result in a failed uaccess fault. Previously, we would try to recover by jumping to the syscall exit code, but we'd run the syscall exit work even though we never made it to the syscall entry work. Clean it up by treating the failure path as a non-syscall entry and exit pair. This fixes strace's output when running the syscall_arg_fault test. Without this fix, strace would get out of sync and would fail to associate syscall entries with syscall exits. Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: Denys Vlasenko <vda.linux@googlemail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kees Cook <keescook@chromium.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: paulmck@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/903010762c07a3d67df914fea2da84b52b0f8f1d.1435952415.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/entry/entry_64.S2
-rw-r--r--arch/x86/entry/entry_64_compat.S35
2 files changed, 34 insertions, 3 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 3bb2c4302df1..141a5d49dddc 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -613,7 +613,7 @@ ret_from_intr:
testb $3, CS(%rsp)
jz retint_kernel
/* Interrupt came from user space */
-retint_user:
+GLOBAL(retint_user)
GET_THREAD_INFO(%rcx)
/* %rcx: thread info. Interrupts are off. */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index b868cfc72985..e5ebdd963a99 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -428,8 +428,39 @@ cstar_tracesys:
END(entry_SYSCALL_compat)
ia32_badarg:
- ASM_CLAC
- movq $-EFAULT, RAX(%rsp)
+ /*
+ * So far, we've entered kernel mode, set AC, turned on IRQs, and
+ * saved C regs except r8-r11. We haven't done any of the other
+ * standard entry work, though. We want to bail, but we shouldn't
+ * treat this as a syscall entry since we don't even know what the
+ * args are. Instead, treat this as a non-syscall entry, finish
+ * the entry work, and immediately exit after setting AX = -EFAULT.
+ *
+ * We're really just being polite here. Killing the task outright
+ * would be a reasonable action, too. Given that the only valid
+ * way to have gotten here is through the vDSO, and we already know
+ * that the stack pointer is bad, the task isn't going to survive
+ * for long no matter what we do.
+ */
+
+ ASM_CLAC /* undo STAC */
+ movq $-EFAULT, RAX(%rsp) /* return -EFAULT if possible */
+
+ /* Fill in the rest of pt_regs */
+ xorl %eax, %eax
+ movq %rax, R11(%rsp)
+ movq %rax, R10(%rsp)
+ movq %rax, R9(%rsp)
+ movq %rax, R8(%rsp)
+ SAVE_EXTRA_REGS
+
+ /* Turn IRQs back off. */
+ DISABLE_INTERRUPTS(CLBR_NONE)
+ TRACE_IRQS_OFF
+
+ /* And exit again. */
+ jmp retint_user
+
ia32_ret_from_sys_call:
xorl %eax, %eax /* Do not leak kernel information */
movq %rax, R11(%rsp)