summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel')
-rw-r--r--arch/arm64/kernel/Makefile2
-rw-r--r--arch/arm64/kernel/alternative.c16
-rw-r--r--arch/arm64/kernel/cpu_errata.c24
-rw-r--r--arch/arm64/kernel/cpufeature.c4
-rw-r--r--arch/arm64/kernel/debug-monitors.c24
-rw-r--r--arch/arm64/kernel/entry-common.c2
-rw-r--r--arch/arm64/kernel/entry.S52
-rw-r--r--arch/arm64/kernel/kgdb.c2
-rw-r--r--arch/arm64/kernel/perf_regs.c25
-rw-r--r--arch/arm64/kernel/probes/kprobes.c12
-rw-r--r--arch/arm64/kernel/ptrace.c49
-rw-r--r--arch/arm64/kernel/signal.c11
-rw-r--r--arch/arm64/kernel/signal32.c27
-rw-r--r--arch/arm64/kernel/syscall.c21
-rw-r--r--arch/arm64/kernel/vdso.c61
-rw-r--r--arch/arm64/kernel/vdso/Makefile7
-rw-r--r--arch/arm64/kernel/vdso/sigreturn.S54
-rw-r--r--arch/arm64/kernel/vdso32/Makefile1
-rw-r--r--arch/arm64/kernel/vdso32/sigreturn.S58
-rw-r--r--arch/arm64/kernel/vdso32/vdso.lds.S12
-rw-r--r--arch/arm64/kernel/vmlinux.lds.S3
21 files changed, 225 insertions, 242 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 151f28521f1e..a561cbb91d4d 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o
-ifneq ($(CONFIG_COMPAT_VDSO), y)
obj-$(CONFIG_COMPAT) += sigreturn32.o
-endif
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index d1757ef1b1e7..73039949b5ce 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -43,20 +43,8 @@ bool alternative_is_applied(u16 cpufeature)
*/
static bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
{
- unsigned long replptr;
-
- if (kernel_text_address(pc))
- return true;
-
- replptr = (unsigned long)ALT_REPL_PTR(alt);
- if (pc >= replptr && pc <= (replptr + alt->alt_len))
- return false;
-
- /*
- * Branching into *another* alternate sequence is doomed, and
- * we're not even trying to fix it up.
- */
- BUG();
+ unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
+ return !(pc >= replptr && pc <= (replptr + alt->alt_len));
}
#define align_down(x, a) ((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index ad06d6802d2e..79728bfb5351 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -460,6 +460,8 @@ static const struct midr_range arm64_ssb_cpus[] = {
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{},
};
@@ -470,12 +472,7 @@ static bool
has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
int scope)
{
- u32 midr = read_cpuid_id();
- /* Cortex-A76 r0p0 - r3p1 */
- struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
-
- WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
- return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+ return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
}
#endif
@@ -726,6 +723,8 @@ static const struct midr_range erratum_1418040_list[] = {
MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
/* Neoverse-N1 r0p0 to r3p1 */
MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
{},
};
#endif
@@ -770,11 +769,23 @@ static const struct midr_range erratum_speculative_at_list[] = {
#ifdef CONFIG_ARM64_ERRATUM_1530923
/* Cortex A55 r0p0 to r2p0 */
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+static const struct midr_range erratum_1463225[] = {
+ /* Cortex-A76 r0p0 - r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
+ MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
+ {},
+};
+#endif
+
const struct arm64_cpu_capabilities arm64_errata[] = {
#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
{
@@ -914,6 +925,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_1463225,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_cortex_a76_erratum_1463225,
+ .midr_range_list = erratum_1463225,
},
#endif
#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 4ae41670c2e6..9fae0efc80c1 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1290,6 +1290,8 @@ static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
+ MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
{ /* sentinel */ }
};
char const *str = "kpti command line option";
@@ -1406,6 +1408,8 @@ static bool cpu_has_broken_dbm(void)
static const struct midr_range cpus[] = {
#ifdef CONFIG_ARM64_ERRATUM_1024718
MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 1, 0), // A55 r0p0 -r1p0
+ /* Kryo4xx Silver (rdpe => r1p0) */
+ MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
#endif
{},
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 5df49366e9ab..7310a4f7f993 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -141,17 +141,20 @@ postcore_initcall(debug_monitors_init);
/*
* Single step API and exception handling.
*/
-static void set_regs_spsr_ss(struct pt_regs *regs)
+static void set_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate |= DBG_SPSR_SS;
}
-NOKPROBE_SYMBOL(set_regs_spsr_ss);
+NOKPROBE_SYMBOL(set_user_regs_spsr_ss);
-static void clear_regs_spsr_ss(struct pt_regs *regs)
+static void clear_user_regs_spsr_ss(struct user_pt_regs *regs)
{
regs->pstate &= ~DBG_SPSR_SS;
}
-NOKPROBE_SYMBOL(clear_regs_spsr_ss);
+NOKPROBE_SYMBOL(clear_user_regs_spsr_ss);
+
+#define set_regs_spsr_ss(r) set_user_regs_spsr_ss(&(r)->user_regs)
+#define clear_regs_spsr_ss(r) clear_user_regs_spsr_ss(&(r)->user_regs)
static DEFINE_SPINLOCK(debug_hook_lock);
static LIST_HEAD(user_step_hook);
@@ -391,17 +394,26 @@ void user_rewind_single_step(struct task_struct *task)
* If single step is active for this thread, then set SPSR.SS
* to 1 to avoid returning to the active-pending state.
*/
- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
set_regs_spsr_ss(task_pt_regs(task));
}
NOKPROBE_SYMBOL(user_rewind_single_step);
void user_fastforward_single_step(struct task_struct *task)
{
- if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
clear_regs_spsr_ss(task_pt_regs(task));
}
+void user_regs_reset_single_step(struct user_pt_regs *regs,
+ struct task_struct *task)
+{
+ if (test_tsk_thread_flag(task, TIF_SINGLESTEP))
+ set_user_regs_spsr_ss(regs);
+ else
+ clear_user_regs_spsr_ss(regs);
+}
+
/* Kernel API */
void kernel_enable_single_step(struct pt_regs *regs)
{
diff --git a/arch/arm64/kernel/entry-common.c b/arch/arm64/kernel/entry-common.c
index 3dbdf9752b11..d3be9dbf5490 100644
--- a/arch/arm64/kernel/entry-common.c
+++ b/arch/arm64/kernel/entry-common.c
@@ -57,7 +57,7 @@ static void notrace el1_dbg(struct pt_regs *regs, unsigned long esr)
/*
* The CPU masked interrupts, and we are leaving them masked during
* do_debug_exception(). Update PMR as if we had called
- * local_mask_daif().
+ * local_daif_mask().
*/
if (system_uses_irq_prio_masking())
gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5304d193c79d..35de8ba60e3d 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -126,8 +126,10 @@ alternative_else_nop_endif
add \dst, \dst, #(\sym - .entry.tramp.text)
.endm
- // This macro corrupts x0-x3. It is the caller's duty
- // to save/restore them if required.
+ /*
+ * This macro corrupts x0-x3. It is the caller's duty to save/restore
+ * them if required.
+ */
.macro apply_ssbd, state, tmp1, tmp2
#ifdef CONFIG_ARM64_SSBD
alternative_cb arm64_enable_wa2_handling
@@ -167,13 +169,28 @@ alternative_cb_end
stp x28, x29, [sp, #16 * 14]
.if \el == 0
+ .if \regsize == 32
+ /*
+ * If we're returning from a 32-bit task on a system affected by
+ * 1418040 then re-enable userspace access to the virtual counter.
+ */
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+ mrs x0, cntkctl_el1
+ orr x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+ .endif
clear_gp_regs
mrs x21, sp_el0
ldr_this_cpu tsk, __entry_task, x20
msr sp_el0, tsk
- // Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
- // when scheduling.
+ /*
+ * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions
+ * when scheduling.
+ */
ldr x19, [tsk, #TSK_TI_FLAGS]
disable_step_tsk x19, x20
@@ -320,6 +337,14 @@ alternative_else_nop_endif
tst x22, #PSR_MODE32_BIT // native task?
b.eq 3f
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if ARM64_WORKAROUND_1418040
+ mrs x0, cntkctl_el1
+ bic x0, x0, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
+ msr cntkctl_el1, x0
+alternative_else_nop_endif
+#endif
+
#ifdef CONFIG_ARM64_ERRATUM_845719
alternative_if ARM64_WORKAROUND_845719
#ifdef CONFIG_PID_IN_CONTEXTIDR
@@ -331,21 +356,6 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
-#ifdef CONFIG_ARM64_ERRATUM_1418040
-alternative_if_not ARM64_WORKAROUND_1418040
- b 4f
-alternative_else_nop_endif
- /*
- * if (x22.mode32 == cntkctl_el1.el0vcten)
- * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
- */
- mrs x1, cntkctl_el1
- eon x0, x1, x22, lsr #3
- tbz x0, #1, 4f
- eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN
- msr cntkctl_el1, x1
-4:
-#endif
scs_save tsk, x0
/* No kernel C function calls after this as user keys are set. */
@@ -377,11 +387,11 @@ alternative_else_nop_endif
.if \el == 0
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
- bne 5f
+ bne 4f
msr far_el1, x30
tramp_alias x30, tramp_exit_native
br x30
-5:
+4:
tramp_alias x30, tramp_exit_compat
br x30
#endif
diff --git a/arch/arm64/kernel/kgdb.c b/arch/arm64/kernel/kgdb.c
index 43119922341f..1a157ca33262 100644
--- a/arch/arm64/kernel/kgdb.c
+++ b/arch/arm64/kernel/kgdb.c
@@ -252,7 +252,7 @@ static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
if (!kgdb_single_step)
return DBG_HOOK_ERROR;
- kgdb_handle_exception(1, SIGTRAP, 0, regs);
+ kgdb_handle_exception(0, SIGTRAP, 0, regs);
return DBG_HOOK_HANDLED;
}
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 0bbac612146e..666b225aeb3a 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -15,15 +15,34 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
return 0;
/*
- * Compat (i.e. 32 bit) mode:
- * - PC has been set in the pt_regs struct in kernel_entry,
- * - Handle SP and LR here.
+ * Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
+ * we're stuck with it for ABI compatability reasons.
+ *
+ * For a 32-bit consumer inspecting a 32-bit task, then it will look at
+ * the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).
+ * These correspond directly to a prefix of the registers saved in our
+ * 'struct pt_regs', with the exception of the PC, so we copy that down
+ * (x15 corresponds to SP_hyp in the architecture).
+ *
+ * So far, so good.
+ *
+ * The oddity arises when a 64-bit consumer looks at a 32-bit task and
+ * asks for registers beyond PERF_REG_ARM_MAX. In this case, we return
+ * SP_usr, LR_usr and PC in the positions where the AArch64 SP, LR and
+ * PC registers would normally live. The initial idea was to allow a
+ * 64-bit unwinder to unwind a 32-bit task and, although it's not clear
+ * how well that works in practice, somebody might be relying on it.
+ *
+ * At the time we make a sample, we don't know whether the consumer is
+ * 32-bit or 64-bit, so we have to cater for both possibilities.
*/
if (compat_user_mode(regs)) {
if ((u32)idx == PERF_REG_ARM64_SP)
return regs->compat_sp;
if ((u32)idx == PERF_REG_ARM64_LR)
return regs->compat_lr;
+ if (idx == 15)
+ return regs->pc;
}
if ((u32)idx == PERF_REG_ARM64_SP)
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index d1c95dcf1d78..5290f17a4d80 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -120,15 +120,9 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
void *alloc_insn_page(void)
{
- void *page;
-
- page = vmalloc_exec(PAGE_SIZE);
- if (page) {
- set_memory_ro((unsigned long)page, 1);
- set_vm_flush_reset_perms(page);
- }
-
- return page;
+ return __vmalloc_node_range(PAGE_SIZE, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_ROX, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
/* arm kprobe: install breakpoint in text */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 68b7f34a08f5..1e02e98e68dd 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -1811,19 +1811,42 @@ static void tracehook_report_syscall(struct pt_regs *regs,
unsigned long saved_reg;
/*
- * A scratch register (ip(r12) on AArch32, x7 on AArch64) is
- * used to denote syscall entry/exit:
+ * We have some ABI weirdness here in the way that we handle syscall
+ * exit stops because we indicate whether or not the stop has been
+ * signalled from syscall entry or syscall exit by clobbering a general
+ * purpose register (ip/r12 for AArch32, x7 for AArch64) in the tracee
+ * and restoring its old value after the stop. This means that:
+ *
+ * - Any writes by the tracer to this register during the stop are
+ * ignored/discarded.
+ *
+ * - The actual value of the register is not available during the stop,
+ * so the tracer cannot save it and restore it later.
+ *
+ * - Syscall stops behave differently to seccomp and pseudo-step traps
+ * (the latter do not nobble any registers).
*/
regno = (is_compat_task() ? 12 : 7);
saved_reg = regs->regs[regno];
regs->regs[regno] = dir;
- if (dir == PTRACE_SYSCALL_EXIT)
+ if (dir == PTRACE_SYSCALL_ENTER) {
+ if (tracehook_report_syscall_entry(regs))
+ forget_syscall(regs);
+ regs->regs[regno] = saved_reg;
+ } else if (!test_thread_flag(TIF_SINGLESTEP)) {
tracehook_report_syscall_exit(regs, 0);
- else if (tracehook_report_syscall_entry(regs))
- forget_syscall(regs);
+ regs->regs[regno] = saved_reg;
+ } else {
+ regs->regs[regno] = saved_reg;
- regs->regs[regno] = saved_reg;
+ /*
+ * Signal a pseudo-step exception since we are stepping but
+ * tracer modifications to the registers may have rewound the
+ * state machine.
+ */
+ tracehook_report_syscall_exit(regs, 1);
+ }
}
int syscall_trace_enter(struct pt_regs *regs)
@@ -1833,12 +1856,12 @@ int syscall_trace_enter(struct pt_regs *regs)
if (flags & (_TIF_SYSCALL_EMU | _TIF_SYSCALL_TRACE)) {
tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
if (!in_syscall(regs) || (flags & _TIF_SYSCALL_EMU))
- return -1;
+ return NO_SYSCALL;
}
/* Do the secure computing after ptrace; failures should be fast. */
if (secure_computing() == -1)
- return -1;
+ return NO_SYSCALL;
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
trace_sys_enter(regs, regs->syscallno);
@@ -1851,12 +1874,14 @@ int syscall_trace_enter(struct pt_regs *regs)
void syscall_trace_exit(struct pt_regs *regs)
{
+ unsigned long flags = READ_ONCE(current_thread_info()->flags);
+
audit_syscall_exit(regs);
- if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ if (flags & _TIF_SYSCALL_TRACEPOINT)
trace_sys_exit(regs, regs_return_value(regs));
- if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (flags & (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP))
tracehook_report_syscall(regs, PTRACE_SYSCALL_EXIT);
rseq_syscall(regs);
@@ -1934,8 +1959,8 @@ static int valid_native_regs(struct user_pt_regs *regs)
*/
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task)
{
- if (!test_tsk_thread_flag(task, TIF_SINGLESTEP))
- regs->pstate &= ~DBG_SPSR_SS;
+ /* https://lore.kernel.org/lkml/20191118131525.GA4180@willie-the-truck */
+ user_regs_reset_single_step(regs, task);
if (is_compat_thread(task_thread_info(task)))
return valid_compat_regs(regs);
diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c
index 801d56cdf701..3b4f31f35e45 100644
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
@@ -800,7 +800,6 @@ static void setup_restart_syscall(struct pt_regs *regs)
*/
static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
sigset_t *oldset = sigmask_to_save();
int usig = ksig->sig;
int ret;
@@ -824,14 +823,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
*/
ret |= !valid_user_regs(&regs->user_regs, current);
- /*
- * Fast forward the stepping logic so we step into the signal
- * handler.
- */
- if (!ret)
- user_fastforward_single_step(tsk);
-
- signal_setup_done(ret, ksig, 0);
+ /* Step into the signal handler if we are stepping */
+ signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
}
/*
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 82feca6f7052..2f507f565c48 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -342,38 +342,13 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
retcode = ptr_to_compat(ka->sa.sa_restorer);
} else {
/* Set up sigreturn pointer */
-#ifdef CONFIG_COMPAT_VDSO
- void *vdso_base = current->mm->context.vdso;
- void *vdso_trampoline;
-
- if (ka->sa.sa_flags & SA_SIGINFO) {
- if (thumb) {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_rt_sigreturn_thumb);
- } else {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_rt_sigreturn_arm);
- }
- } else {
- if (thumb) {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_sigreturn_thumb);
- } else {
- vdso_trampoline = VDSO_SYMBOL(vdso_base,
- compat_sigreturn_arm);
- }
- }
-
- retcode = ptr_to_compat(vdso_trampoline) + thumb;
-#else
unsigned int idx = thumb << 1;
if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3;
- retcode = (unsigned long)current->mm->context.vdso +
+ retcode = (unsigned long)current->mm->context.sigpage +
(idx << 2) + thumb;
-#endif
}
regs->regs[0] = usig;
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5f5b868292f5..5f0c04863d2c 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -50,6 +50,9 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
ret = do_ni_syscall(regs, scno);
}
+ if (is_compat_task())
+ ret = lower_32_bits(ret);
+
regs->regs[0] = ret;
}
@@ -121,7 +124,21 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
user_exit();
if (has_syscall_work(flags)) {
- /* set default errno for user-issued syscall(-1) */
+ /*
+ * The de-facto standard way to skip a system call using ptrace
+ * is to set the system call to -1 (NO_SYSCALL) and set x0 to a
+ * suitable error code for consumption by userspace. However,
+ * this cannot be distinguished from a user-issued syscall(-1)
+ * and so we must set x0 to -ENOSYS here in case the tracer doesn't
+ * issue the skip and we fall into trace_exit with x0 preserved.
+ *
+ * This is slightly odd because it also means that if a tracer
+ * sets the system call number to -1 but does not initialise x0,
+ * then x0 will be preserved for all system calls apart from a
+ * user-issued syscall(-1). However, requesting a skip and not
+ * setting the return value is unlikely to do anything sensible
+ * anyway.
+ */
if (scno == NO_SYSCALL)
regs->regs[0] = -ENOSYS;
scno = syscall_trace_enter(regs);
@@ -139,7 +156,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
local_daif_mask();
flags = current_thread_info()->flags;
- if (!has_syscall_work(flags)) {
+ if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
/*
* We're off to userspace, where interrupts are
* always enabled after we restore the flags from
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 4e016574bd91..e546df0efefb 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -191,15 +191,12 @@ enum aarch32_map {
#ifdef CONFIG_COMPAT_VDSO
AA32_MAP_VVAR,
AA32_MAP_VDSO,
-#else
- AA32_MAP_SIGPAGE
#endif
+ AA32_MAP_SIGPAGE
};
static struct page *aarch32_vectors_page __ro_after_init;
-#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init;
-#endif
static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vdso]",
.mremap = aarch32_vdso_mremap,
},
-#else
+#endif /* CONFIG_COMPAT_VDSO */
[AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page,
},
-#endif /* CONFIG_COMPAT_VDSO */
};
static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
return 0;
}
-#ifdef CONFIG_COMPAT_VDSO
-static int __aarch32_alloc_vdso_pages(void)
-{
- int ret;
-
- vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
- vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
-
- ret = __vdso_init(VDSO_ABI_AA32);
- if (ret)
- return ret;
-
- return aarch32_alloc_kuser_vdso_page();
-}
-#else
-static int __aarch32_alloc_vdso_pages(void)
+static int aarch32_alloc_sigpage(void)
{
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage;
- int ret;
sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage);
flush_dcache_page(aarch32_sig_page);
+ return 0;
+}
- ret = aarch32_alloc_kuser_vdso_page();
- if (ret)
- free_page(sigpage);
+#ifdef CONFIG_COMPAT_VDSO
+static int __aarch32_alloc_vdso_pages(void)
+{
+ vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
+ vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
- return ret;
+ return __vdso_init(VDSO_ABI_AA32);
}
#endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void)
{
- return __aarch32_alloc_vdso_pages();
+ int ret;
+
+#ifdef CONFIG_COMPAT_VDSO
+ ret = __aarch32_alloc_vdso_pages();
+ if (ret)
+ return ret;
+#endif
+
+ ret = aarch32_alloc_sigpage();
+ if (ret)
+ return ret;
+
+ return aarch32_alloc_kuser_vdso_page();
}
arch_initcall(aarch32_alloc_vdso_pages);
@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret);
}
-#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
if (IS_ERR(ret))
goto out;
- mm->context.vdso = (void *)addr;
+ mm->context.sigpage = (void *)addr;
out:
return PTR_ERR_OR_ZERO(ret);
}
-#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm,
bprm,
uses_interp);
-#else
- ret = aarch32_sigreturn_setup(mm);
+ if (ret)
+ goto out;
#endif /* CONFIG_COMPAT_VDSO */
+ ret = aarch32_sigreturn_setup(mm);
out:
mmap_write_unlock(mm);
return ret;
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index 556d424c6f52..45d5cfe46429 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -23,13 +23,14 @@ btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti
# potential future proofing if we end up with internal calls to the exported
# routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so
# preparation in build-time C")).
-ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
- -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T
+ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \
+ -Bsymbolic $(call ld-option, --no-eh-frame-hdr) --build-id -n \
+ $(btildflags-y) -T
ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18
ccflags-y += -DDISABLE_BRANCH_PROFILING
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) -Os $(CC_FLAGS_SCS) $(GCC_PLUGINS_CFLAGS)
KBUILD_CFLAGS += $(DISABLE_LTO)
KASAN_SANITIZE := n
UBSAN_SANITIZE := n
diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S
index 620a3ef837b7..0e18729abc3b 100644
--- a/arch/arm64/kernel/vdso/sigreturn.S
+++ b/arch/arm64/kernel/vdso/sigreturn.S
@@ -18,29 +18,40 @@
.text
+/*
+ * NOTE!!! You may notice that all of the .cfi directives in this file have
+ * been commented out. This is because they have been shown to trigger segfaults
+ * in libgcc when unwinding out of a SIGCANCEL handler to invoke pthread
+ * cleanup handlers during the thread cancellation dance. By omitting the
+ * directives, we trigger an arm64-specific fallback path in the unwinder which
+ * recognises the signal frame and restores many of the registers directly from
+ * the sigcontext. Re-enabling the cfi directives here therefore needs to be
+ * much more comprehensive to reduce the risk of further regressions.
+ */
+
/* Ensure that the mysterious NOP can be associated with a function. */
- .cfi_startproc
+// .cfi_startproc
/*
- * .cfi_signal_frame causes the corresponding Frame Description Entry in the
- * .eh_frame section to be annotated as a signal frame. This allows DWARF
- * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits
- * unwinding out of the signal trampoline without the need for the mysterious
- * NOP.
+ * .cfi_signal_frame causes the corresponding Frame Description Entry (FDE) in
+ * the .eh_frame section to be annotated as a signal frame. This allows DWARF
+ * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo() and identify
+ * the next frame using the unmodified return address instead of subtracting 1,
+ * which may yield the wrong FDE.
*/
- .cfi_signal_frame
+// .cfi_signal_frame
/*
* Tell the unwinder where to locate the frame record linking back to the
- * interrupted context. We don't provide unwind info for registers other
- * than the frame pointer and the link register here; in practice, this
- * is sufficient for unwinding in C/C++ based runtimes and the values in
- * the sigcontext may have been modified by this point anyway. Debuggers
+ * interrupted context. We don't provide unwind info for registers other than
+ * the frame pointer and the link register here; in practice, this is likely to
+ * be insufficient for unwinding in C/C++ based runtimes, especially without a
+ * means to restore the stack pointer. Thankfully, unwinders and debuggers
* already have baked-in strategies for attempting to unwind out of signals.
*/
- .cfi_def_cfa x29, 0
- .cfi_offset x29, 0 * 8
- .cfi_offset x30, 1 * 8
+// .cfi_def_cfa x29, 0
+// .cfi_offset x29, 0 * 8
+// .cfi_offset x30, 1 * 8
/*
* This mysterious NOP is required for some unwinders (e.g. libc++) that
@@ -51,16 +62,19 @@
nop // Mysterious NOP
/*
- * GDB relies on being able to identify the sigreturn instruction sequence to
- * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START()
- * here, as it will emit a BTI C instruction and break the unwinder. Thankfully,
- * this function is only ever called from a RET and so omitting the landing pad
- * is perfectly fine.
+ * GDB, libgcc and libunwind rely on being able to identify the sigreturn
+ * instruction sequence to unwind from signal handlers. We cannot, therefore,
+ * use SYM_FUNC_START() here, as it will emit a BTI C instruction and break the
+ * unwinder. Thankfully, this function is only ever called from a RET and so
+ * omitting the landing pad is perfectly fine.
*/
SYM_CODE_START(__kernel_rt_sigreturn)
+// PLEASE DO NOT MODIFY
mov x8, #__NR_rt_sigreturn
+// PLEASE DO NOT MODIFY
svc #0
- .cfi_endproc
+// PLEASE DO NOT MODIFY
+// .cfi_endproc
SYM_CODE_END(__kernel_rt_sigreturn)
emit_aarch64_feature_1_and
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 7ea1e827e505..d88148bef6b0 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -140,7 +140,6 @@ hostprogs := $(munge)
c-obj-vdso := note.o
c-obj-vdso-gettimeofday := vgettimeofday.o
-asm-obj-vdso := sigreturn.o
ifneq ($(c-gettimeofday-y),)
VDSO_CFLAGS_gettimeofday_o += -include $(c-gettimeofday-y)
diff --git a/arch/arm64/kernel/vdso32/sigreturn.S b/arch/arm64/kernel/vdso32/sigreturn.S
deleted file mode 100644
index b0091064c3d6..000000000000
--- a/arch/arm64/kernel/vdso32/sigreturn.S
+++ /dev/null
@@ -1,58 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * This file provides both A32 and T32 versions, in accordance with the
- * arm sigreturn code.
- *
- * Please read the comments in arch/arm64/kernel/vdso/sigreturn.S to
- * understand some of the craziness in here.
- *
- * Copyright (C) 2018 ARM Limited
- */
-
-#include <linux/linkage.h>
-#include <asm/asm-offsets.h>
-#include <asm/unistd.h>
-
- .text
-
- .arm
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_sigreturn_arm)
- mov r7, #__NR_compat_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_sigreturn_arm)
-
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_rt_sigreturn_arm)
- mov r7, #__NR_compat_rt_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_arm)
-
- .thumb
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_sigreturn_thumb)
- mov r7, #__NR_compat_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_sigreturn_thumb)
-
- .fnstart
- .save {r0-r15}
- .pad #COMPAT_RT_SIGFRAME_REGS_OFFSET
- nop
-SYM_CODE_START(__kernel_rt_sigreturn_thumb)
- mov r7, #__NR_compat_rt_sigreturn
- svc #0
- .fnend
-SYM_CODE_END(__kernel_rt_sigreturn_thumb)
diff --git a/arch/arm64/kernel/vdso32/vdso.lds.S b/arch/arm64/kernel/vdso32/vdso.lds.S
index a3944927eaeb..337d03522048 100644
--- a/arch/arm64/kernel/vdso32/vdso.lds.S
+++ b/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -64,19 +64,7 @@ VERSION
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_clock_getres;
- __kernel_sigreturn_arm;
- __kernel_sigreturn_thumb;
- __kernel_rt_sigreturn_arm;
- __kernel_rt_sigreturn_thumb;
__vdso_clock_gettime64;
local: *;
};
}
-
-/*
- * Make the sigreturn code visible to the kernel.
- */
-VDSO_compat_sigreturn_arm = __kernel_sigreturn_arm;
-VDSO_compat_sigreturn_thumb = __kernel_sigreturn_thumb;
-VDSO_compat_rt_sigreturn_arm = __kernel_rt_sigreturn_arm;
-VDSO_compat_rt_sigreturn_thumb = __kernel_rt_sigreturn_thumb;
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 6827da7f3aa5..5423ffe0a987 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -165,9 +165,6 @@ SECTIONS
*(.altinstructions)
__alt_instructions_end = .;
}
- .altinstr_replacement : {
- *(.altinstr_replacement)
- }
. = ALIGN(SEGMENT_ALIGN);
__inittext_end = .;