diff options
author | Will Deacon <will@kernel.org> | 2020-05-28 19:00:51 +0200 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2020-05-28 19:00:51 +0200 |
commit | d27865279f12035c730818aa1a0280fada866a37 (patch) | |
tree | 5c4462885c2a6d7453b34ceb51d39b390030a2a6 /arch/arm64/kernel/vdso | |
parent | Merge branches 'for-next/acpi', 'for-next/bpf', 'for-next/cpufeature', 'for-n... (diff) | |
parent | arm64: vdso: Fix CFI directives in sigreturn trampoline (diff) | |
download | linux-d27865279f12035c730818aa1a0280fada866a37.tar.xz linux-d27865279f12035c730818aa1a0280fada866a37.zip |
Merge branch 'for-next/bti' into for-next/core
Support for Branch Target Identification (BTI) in user and kernel
(Mark Brown and others)
* for-next/bti: (39 commits)
arm64: vdso: Fix CFI directives in sigreturn trampoline
arm64: vdso: Don't prefix sigreturn trampoline with a BTI C instruction
arm64: bti: Fix support for userspace only BTI
arm64: kconfig: Update and comment GCC version check for kernel BTI
arm64: vdso: Map the vDSO text with guarded pages when built for BTI
arm64: vdso: Force the vDSO to be linked as BTI when built for BTI
arm64: vdso: Annotate for BTI
arm64: asm: Provide a mechanism for generating ELF note for BTI
arm64: bti: Provide Kconfig for kernel mode BTI
arm64: mm: Mark executable text as guarded pages
arm64: bpf: Annotate JITed code for BTI
arm64: Set GP bit in kernel page tables to enable BTI for the kernel
arm64: asm: Override SYM_FUNC_START when building the kernel with BTI
arm64: bti: Support building kernel C code using BTI
arm64: Document why we enable PAC support for leaf functions
arm64: insn: Report PAC and BTI instructions as skippable
arm64: insn: Don't assume unrecognized HINTs are skippable
arm64: insn: Provide a better name for aarch64_insn_is_nop()
arm64: insn: Add constants for new HINT instruction decode
arm64: Disable old style assembly annotations
...
Diffstat (limited to 'arch/arm64/kernel/vdso')
-rw-r--r-- | arch/arm64/kernel/vdso/Makefile | 4 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/note.S | 3 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/sigreturn.S | 54 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso/vdso.S | 3 |
4 files changed, 56 insertions, 8 deletions
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 95e9e444ca93..fccd67d07c8c 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile @@ -17,12 +17,14 @@ obj-vdso := vgettimeofday.o note.o sigreturn.o targets := $(obj-vdso) vdso.so vdso.so.dbg obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) +btildflags-$(CONFIG_ARM64_BTI_KERNEL) += -z force-bti + # -Bsymbolic has been added for consistency with arm, the compat vDSO and # potential future proofing if we end up with internal calls to the exported # routines, as x86 does (see 6f121e548f83 ("x86, vdso: Reimplement vdso.so # preparation in build-time C")). ldflags-y := -shared -nostdlib -soname=linux-vdso.so.1 --hash-style=sysv \ - -Bsymbolic --eh-frame-hdr --build-id -n -T + -Bsymbolic --eh-frame-hdr --build-id -n $(btildflags-y) -T ccflags-y := -fno-common -fno-builtin -fno-stack-protector -ffixed-x18 ccflags-y += -DDISABLE_BRANCH_PROFILING diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S index 0ce6ec75a525..3d4e82290c80 100644 --- a/arch/arm64/kernel/vdso/note.S +++ b/arch/arm64/kernel/vdso/note.S @@ -12,9 +12,12 @@ #include <linux/version.h> #include <linux/elfnote.h> #include <linux/build-salt.h> +#include <asm/assembler.h> ELFNOTE_START(Linux, 0, "a") .long LINUX_VERSION_CODE ELFNOTE_END BUILD_SALT + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S index 12324863d5c2..620a3ef837b7 100644 --- a/arch/arm64/kernel/vdso/sigreturn.S +++ b/arch/arm64/kernel/vdso/sigreturn.S @@ -1,7 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* * Sigreturn trampoline for returning from a signal when the SA_RESTORER - * flag is not set. + * flag is not set. It serves primarily as a hall of shame for crappy + * unwinders and features an exciting but mysterious NOP instruction. + * + * It's also fragile as hell, so please think twice before changing anything + * in here. * * Copyright (C) 2012 ARM Limited * @@ -9,18 +13,54 @@ */ #include <linux/linkage.h> +#include <asm/assembler.h> #include <asm/unistd.h> .text - nop -SYM_FUNC_START(__kernel_rt_sigreturn) +/* Ensure that the mysterious NOP can be associated with a function. */ .cfi_startproc + +/* + * .cfi_signal_frame causes the corresponding Frame Description Entry in the + * .eh_frame section to be annotated as a signal frame. This allows DWARF + * unwinders (e.g. libstdc++) to implement _Unwind_GetIPInfo(), which permits + * unwinding out of the signal trampoline without the need for the mysterious + * NOP. + */ .cfi_signal_frame - .cfi_def_cfa x29, 0 - .cfi_offset x29, 0 * 8 - .cfi_offset x30, 1 * 8 + +/* + * Tell the unwinder where to locate the frame record linking back to the + * interrupted context. We don't provide unwind info for registers other + * than the frame pointer and the link register here; in practice, this + * is sufficient for unwinding in C/C++ based runtimes and the values in + * the sigcontext may have been modified by this point anyway. Debuggers + * already have baked-in strategies for attempting to unwind out of signals. + */ + .cfi_def_cfa x29, 0 + .cfi_offset x29, 0 * 8 + .cfi_offset x30, 1 * 8 + +/* + * This mysterious NOP is required for some unwinders (e.g. libc++) that + * unconditionally subtract one from the result of _Unwind_GetIP() in order to + * identify the calling function. + * Hack borrowed from arch/powerpc/kernel/vdso64/sigtramp.S. + */ + nop // Mysterious NOP + +/* + * GDB relies on being able to identify the sigreturn instruction sequence to + * unwind from signal handlers. We cannot, therefore, use SYM_FUNC_START() + * here, as it will emit a BTI C instruction and break the unwinder. Thankfully, + * this function is only ever called from a RET and so omitting the landing pad + * is perfectly fine. + */ +SYM_CODE_START(__kernel_rt_sigreturn) mov x8, #__NR_rt_sigreturn svc #0 .cfi_endproc -SYM_FUNC_END(__kernel_rt_sigreturn) +SYM_CODE_END(__kernel_rt_sigreturn) + +emit_aarch64_feature_1_and diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S index d1414fee5274..c4b1990bf2be 100644 --- a/arch/arm64/kernel/vdso/vdso.S +++ b/arch/arm64/kernel/vdso/vdso.S @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/linkage.h> #include <linux/const.h> +#include <asm/assembler.h> #include <asm/page.h> .globl vdso_start, vdso_end @@ -19,3 +20,5 @@ vdso_start: vdso_end: .previous + +emit_aarch64_feature_1_and |