summaryrefslogtreecommitdiffstats
path: root/fs/binfmt_elf.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-02 00:18:27 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-02 00:18:27 +0200
commit533b220f7be4e461a5222a223d169b42856741ef (patch)
tree3684fae5a676b31a4a75e275a0ee5519f0d3c6d9 /fs/binfmt_elf.c
parentMerge tag 'm68k-for-v5.8-tag1' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff)
parentMerge branch 'for-next/scs' into for-next/core (diff)
downloadlinux-533b220f7be4e461a5222a223d169b42856741ef.tar.xz
linux-533b220f7be4e461a5222a223d169b42856741ef.zip
Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: "A sizeable pile of arm64 updates for 5.8. Summary below, but the big two features are support for Branch Target Identification and Clang's Shadow Call stack. The latter is currently arm64-only, but the high-level parts are all in core code so it could easily be adopted by other architectures pending toolchain support Branch Target Identification (BTI): - Support for ARMv8.5-BTI in both user- and kernel-space. This allows branch targets to limit the types of branch from which they can be called and additionally prevents branching to arbitrary code, although kernel support requires a very recent toolchain. - Function annotation via SYM_FUNC_START() so that assembly functions are wrapped with the relevant "landing pad" instructions. - BPF and vDSO updates to use the new instructions. - Addition of a new HWCAP and exposure of BTI capability to userspace via ID register emulation, along with ELF loader support for the BTI feature in .note.gnu.property. - Non-critical fixes to CFI unwind annotations in the sigreturn trampoline. Shadow Call Stack (SCS): - Support for Clang's Shadow Call Stack feature, which reserves platform register x18 to point at a separate stack for each task that holds only return addresses. This protects function return control flow from buffer overruns on the main stack. - Save/restore of x18 across problematic boundaries (user-mode, hypervisor, EFI, suspend, etc). - Core support for SCS, should other architectures want to use it too. - SCS overflow checking on context-switch as part of the existing stack limit check if CONFIG_SCHED_STACK_END_CHECK=y. CPU feature detection: - Removed numerous "SANITY CHECK" errors when running on a system with mismatched AArch32 support at EL1. This is primarily a concern for KVM, which disabled support for 32-bit guests on such a system. - Addition of new ID registers and fields as the architecture has been extended. Perf and PMU drivers: - Minor fixes and cleanups to system PMU drivers. Hardware errata: - Unify KVM workarounds for VHE and nVHE configurations. - Sort vendor errata entries in Kconfig. Secure Monitor Call Calling Convention (SMCCC): - Update to the latest specification from Arm (v1.2). - Allow PSCI code to query the SMCCC version. Software Delegated Exception Interface (SDEI): - Unexport a bunch of unused symbols. - Minor fixes to handling of firmware data. Pointer authentication: - Add support for dumping the kernel PAC mask in vmcoreinfo so that the stack can be unwound by tools such as kdump. - Simplification of key initialisation during CPU bringup. BPF backend: - Improve immediate generation for logical and add/sub instructions. vDSO: - Minor fixes to the linker flags for consistency with other architectures and support for LLVM's unwinder. - Clean up logic to initialise and map the vDSO into userspace. ACPI: - Work around for an ambiguity in the IORT specification relating to the "num_ids" field. - Support _DMA method for all named components rather than only PCIe root complexes. - Minor other IORT-related fixes. Miscellaneous: - Initialise debug traps early for KGDB and fix KDB cacheflushing deadlock. - Minor tweaks to early boot state (documentation update, set TEXT_OFFSET to 0x0, increase alignment of PE/COFF sections). - Refactoring and cleanup" * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (148 commits) KVM: arm64: Move __load_guest_stage2 to kvm_mmu.h KVM: arm64: Check advertised Stage-2 page size capability arm64/cpufeature: Add get_arm64_ftr_reg_nowarn() ACPI/IORT: Remove the unused __get_pci_rid() arm64/cpuinfo: Add ID_MMFR4_EL1 into the cpuinfo_arm64 context arm64/cpufeature: Add remaining feature bits in ID_AA64PFR1 register arm64/cpufeature: Add remaining feature bits in ID_AA64PFR0 register arm64/cpufeature: Add remaining feature bits in ID_AA64ISAR0 register arm64/cpufeature: Add remaining feature bits in ID_MMFR4 register arm64/cpufeature: Add remaining feature bits in ID_PFR0 register arm64/cpufeature: Introduce ID_MMFR5 CPU register arm64/cpufeature: Introduce ID_DFR1 CPU register arm64/cpufeature: Introduce ID_PFR2 CPU register arm64/cpufeature: Make doublelock a signed feature in ID_AA64DFR0 arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0 register arm64/cpufeature: Add explicit ftr_id_isar0[] for ID_ISAR0 register arm64: mm: Add asid_gen_match() helper firmware: smccc: Fix missing prototype warning for arm_smccc_version_init arm64: vdso: Fix CFI directives in sigreturn trampoline arm64: vdso: Don't prefix sigreturn trampoline with a BTI C instruction ...
Diffstat (limited to 'fs/binfmt_elf.c')
-rw-r--r--fs/binfmt_elf.c145
1 files changed, 139 insertions, 6 deletions
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 25d489bc9453..92402c5606a1 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -40,12 +40,18 @@
#include <linux/sched/coredump.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/cputime.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
#include <linux/cred.h>
#include <linux/dax.h>
#include <linux/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
+#ifndef ELF_COMPAT
+#define ELF_COMPAT 0
+#endif
+
#ifndef user_long_t
#define user_long_t long
#endif
@@ -539,7 +545,8 @@ static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
-static inline int make_prot(u32 p_flags)
+static inline int make_prot(u32 p_flags, struct arch_elf_state *arch_state,
+ bool has_interp, bool is_interp)
{
int prot = 0;
@@ -549,7 +556,8 @@ static inline int make_prot(u32 p_flags)
prot |= PROT_WRITE;
if (p_flags & PF_X)
prot |= PROT_EXEC;
- return prot;
+
+ return arch_elf_adjust_prot(prot, arch_state, has_interp, is_interp);
}
/* This is much more generalized than the library routine read function,
@@ -559,7 +567,8 @@ static inline int make_prot(u32 p_flags)
static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
struct file *interpreter,
- unsigned long no_base, struct elf_phdr *interp_elf_phdata)
+ unsigned long no_base, struct elf_phdr *interp_elf_phdata,
+ struct arch_elf_state *arch_state)
{
struct elf_phdr *eppnt;
unsigned long load_addr = 0;
@@ -591,7 +600,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
if (eppnt->p_type == PT_LOAD) {
int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
- int elf_prot = make_prot(eppnt->p_flags);
+ int elf_prot = make_prot(eppnt->p_flags, arch_state,
+ true, true);
unsigned long vaddr = 0;
unsigned long k, map_addr;
@@ -682,6 +692,111 @@ out:
* libraries. There is no binary dependent code anywhere else.
*/
+static int parse_elf_property(const char *data, size_t *off, size_t datasz,
+ struct arch_elf_state *arch,
+ bool have_prev_type, u32 *prev_type)
+{
+ size_t o, step;
+ const struct gnu_property *pr;
+ int ret;
+
+ if (*off == datasz)
+ return -ENOENT;
+
+ if (WARN_ON_ONCE(*off > datasz || *off % ELF_GNU_PROPERTY_ALIGN))
+ return -EIO;
+ o = *off;
+ datasz -= *off;
+
+ if (datasz < sizeof(*pr))
+ return -ENOEXEC;
+ pr = (const struct gnu_property *)(data + o);
+ o += sizeof(*pr);
+ datasz -= sizeof(*pr);
+
+ if (pr->pr_datasz > datasz)
+ return -ENOEXEC;
+
+ WARN_ON_ONCE(o % ELF_GNU_PROPERTY_ALIGN);
+ step = round_up(pr->pr_datasz, ELF_GNU_PROPERTY_ALIGN);
+ if (step > datasz)
+ return -ENOEXEC;
+
+ /* Properties are supposed to be unique and sorted on pr_type: */
+ if (have_prev_type && pr->pr_type <= *prev_type)
+ return -ENOEXEC;
+ *prev_type = pr->pr_type;
+
+ ret = arch_parse_elf_property(pr->pr_type, data + o,
+ pr->pr_datasz, ELF_COMPAT, arch);
+ if (ret)
+ return ret;
+
+ *off = o + step;
+ return 0;
+}
+
+#define NOTE_DATA_SZ SZ_1K
+#define GNU_PROPERTY_TYPE_0_NAME "GNU"
+#define NOTE_NAME_SZ (sizeof(GNU_PROPERTY_TYPE_0_NAME))
+
+static int parse_elf_properties(struct file *f, const struct elf_phdr *phdr,
+ struct arch_elf_state *arch)
+{
+ union {
+ struct elf_note nhdr;
+ char data[NOTE_DATA_SZ];
+ } note;
+ loff_t pos;
+ ssize_t n;
+ size_t off, datasz;
+ int ret;
+ bool have_prev_type;
+ u32 prev_type;
+
+ if (!IS_ENABLED(CONFIG_ARCH_USE_GNU_PROPERTY) || !phdr)
+ return 0;
+
+ /* load_elf_binary() shouldn't call us unless this is true... */
+ if (WARN_ON_ONCE(phdr->p_type != PT_GNU_PROPERTY))
+ return -ENOEXEC;
+
+ /* If the properties are crazy large, that's too bad (for now): */
+ if (phdr->p_filesz > sizeof(note))
+ return -ENOEXEC;
+
+ pos = phdr->p_offset;
+ n = kernel_read(f, &note, phdr->p_filesz, &pos);
+
+ BUILD_BUG_ON(sizeof(note) < sizeof(note.nhdr) + NOTE_NAME_SZ);
+ if (n < 0 || n < sizeof(note.nhdr) + NOTE_NAME_SZ)
+ return -EIO;
+
+ if (note.nhdr.n_type != NT_GNU_PROPERTY_TYPE_0 ||
+ note.nhdr.n_namesz != NOTE_NAME_SZ ||
+ strncmp(note.data + sizeof(note.nhdr),
+ GNU_PROPERTY_TYPE_0_NAME, n - sizeof(note.nhdr)))
+ return -ENOEXEC;
+
+ off = round_up(sizeof(note.nhdr) + NOTE_NAME_SZ,
+ ELF_GNU_PROPERTY_ALIGN);
+ if (off > n)
+ return -ENOEXEC;
+
+ if (note.nhdr.n_descsz > n - off)
+ return -ENOEXEC;
+ datasz = off + note.nhdr.n_descsz;
+
+ have_prev_type = false;
+ do {
+ ret = parse_elf_property(note.data, &off, datasz, arch,
+ have_prev_type, &prev_type);
+ have_prev_type = true;
+ } while (!ret);
+
+ return ret == -ENOENT ? 0 : ret;
+}
+
static int load_elf_binary(struct linux_binprm *bprm)
{
struct file *interpreter = NULL; /* to shut gcc up */
@@ -689,6 +804,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
int load_addr_set = 0;
unsigned long error;
struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
+ struct elf_phdr *elf_property_phdata = NULL;
unsigned long elf_bss, elf_brk;
int bss_prot = 0;
int retval, i;
@@ -726,6 +842,11 @@ static int load_elf_binary(struct linux_binprm *bprm)
for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
char *elf_interpreter;
+ if (elf_ppnt->p_type == PT_GNU_PROPERTY) {
+ elf_property_phdata = elf_ppnt;
+ continue;
+ }
+
if (elf_ppnt->p_type != PT_INTERP)
continue;
@@ -819,9 +940,14 @@ out_free_interp:
goto out_free_dentry;
/* Pass PT_LOPROC..PT_HIPROC headers to arch code */
+ elf_property_phdata = NULL;
elf_ppnt = interp_elf_phdata;
for (i = 0; i < interp_elf_ex->e_phnum; i++, elf_ppnt++)
switch (elf_ppnt->p_type) {
+ case PT_GNU_PROPERTY:
+ elf_property_phdata = elf_ppnt;
+ break;
+
case PT_LOPROC ... PT_HIPROC:
retval = arch_elf_pt_proc(interp_elf_ex,
elf_ppnt, interpreter,
@@ -832,6 +958,11 @@ out_free_interp:
}
}
+ retval = parse_elf_properties(interpreter ?: bprm->file,
+ elf_property_phdata, &arch_state);
+ if (retval)
+ goto out_free_dentry;
+
/*
* Allow arch code to reject the ELF at this point, whilst it's
* still possible to return an error to the code that invoked
@@ -913,7 +1044,8 @@ out_free_interp:
}
}
- elf_prot = make_prot(elf_ppnt->p_flags);
+ elf_prot = make_prot(elf_ppnt->p_flags, &arch_state,
+ !!interpreter, false);
elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
@@ -1056,7 +1188,8 @@ out_free_interp:
if (interpreter) {
elf_entry = load_elf_interp(interp_elf_ex,
interpreter,
- load_bias, interp_elf_phdata);
+ load_bias, interp_elf_phdata,
+ &arch_state);
if (!IS_ERR((void *)elf_entry)) {
/*
* load_elf_interp() returns relocation