diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/entry/common.c | 12 | ||||
-rw-r--r-- | arch/x86/entry/entry_64.S | 3 |
2 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 74f6eee15179..a8b066dbbf48 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -266,14 +266,13 @@ __visible inline void syscall_return_slowpath(struct pt_regs *regs) } #ifdef CONFIG_X86_64 -__visible void do_syscall_64(struct pt_regs *regs) +__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs) { - struct thread_info *ti = current_thread_info(); - unsigned long nr = regs->orig_ax; + struct thread_info *ti; enter_from_user_mode(); local_irq_enable(); - + ti = current_thread_info(); if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) nr = syscall_trace_enter(regs); @@ -282,8 +281,9 @@ __visible void do_syscall_64(struct pt_regs *regs) * table. The only functional difference is the x32 bit in * regs->orig_ax, which changes the behavior of some syscalls. */ - if (likely((nr & __SYSCALL_MASK) < NR_syscalls)) { - nr = array_index_nospec(nr & __SYSCALL_MASK, NR_syscalls); + nr &= __SYSCALL_MASK; + if (likely(nr < NR_syscalls)) { + nr = array_index_nospec(nr, NR_syscalls); regs->ax = sys_call_table[nr]( regs->di, regs->si, regs->dx, regs->r10, regs->r8, regs->r9); diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 936e19642eab..6cfe38665f3c 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -233,7 +233,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) TRACE_IRQS_OFF /* IRQs are off. */ - movq %rsp, %rdi + movq %rax, %rdi + movq %rsp, %rsi call do_syscall_64 /* returns with IRQs disabled */ TRACE_IRQS_IRETQ /* we're about to change IF */ |