diff options
author | Arnd Bergmann <arnd@arndb.de> | 2012-10-04 22:57:00 +0200 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2012-10-04 22:57:51 +0200 |
commit | c37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch) | |
tree | 7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /arch/arm64/kernel | |
parent | asm-generic: Add default clkdev.h (diff) | |
parent | UAPI: (Scripted) Disintegrate include/asm-generic (diff) | |
download | linux-c37d6154c0b9163c27e53cc1d0be3867b4abd760.tar.xz linux-c37d6154c0b9163c27e53cc1d0be3867b4abd760.zip |
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>:
This is to complete part of the UAPI disintegration for which the
preparatory patches were pulled recently.
Note that there are some fixup patches which are at the base of the
branch aimed at you, plus all arches get the asm-generic branch merged in too.
* 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers:
UAPI: (Scripted) Disintegrate include/asm-generic
UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k)
c6x: remove c6x signal.h
UAPI: Split compound conditionals containing __KERNEL__ in Arm64
UAPI: Fix the guards on various asm/unistd.h files
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/kernel')
38 files changed, 10517 insertions, 0 deletions
diff --git a/arch/arm64/kernel/.gitignore b/arch/arm64/kernel/.gitignore new file mode 100644 index 000000000000..c5f676c3c224 --- /dev/null +++ b/arch/arm64/kernel/.gitignore @@ -0,0 +1 @@ +vmlinux.lds diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile new file mode 100644 index 000000000000..e2caff1b812a --- /dev/null +++ b/arch/arm64/kernel/Makefile @@ -0,0 +1,27 @@ +# +# Makefile for the linux kernel. +# + +CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) +AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) + +# Object file lists. +arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ + entry-fpsimd.o process.o ptrace.o setup.o signal.o \ + sys.o stacktrace.o time.o traps.o io.o vdso.o + +arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ + sys_compat.o +arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o +arm64-obj-$(CONFIG_SMP) += smp.o +arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o +arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT)+= hw_breakpoint.o + +obj-y += $(arm64-obj-y) vdso/ +obj-m += $(arm64-obj-m) +head-y := head.o +extra-y := $(head-y) vmlinux.lds + +# vDSO - this must be built first to generate the symbol offsets +$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h +$(obj)/vdso/vdso-offsets.h: $(obj)/vdso diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c new file mode 100644 index 000000000000..cef3925eaf60 --- /dev/null +++ b/arch/arm64/kernel/arm64ksyms.c @@ -0,0 +1,46 @@ +/* + * Based on arch/arm/kernel/armksyms.c + * + * Copyright (C) 2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/string.h> +#include <linux/cryptohash.h> +#include <linux/delay.h> +#include <linux/in6.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> +#include <linux/io.h> + +#include <asm/checksum.h> + + /* user mem (segment) */ +EXPORT_SYMBOL(__strnlen_user); +EXPORT_SYMBOL(__strncpy_from_user); + +EXPORT_SYMBOL(copy_page); + +EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(__clear_user); + + /* bitops */ +EXPORT_SYMBOL(__atomic_hash); + + /* physical memory */ +EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c new file mode 100644 index 000000000000..a2a4d810bea3 --- /dev/null +++ b/arch/arm64/kernel/asm-offsets.c @@ -0,0 +1,108 @@ +/* + * Based on arch/arm/kernel/asm-offsets.c + * + * Copyright (C) 1995-2003 Russell King + * 2001-2002 Keith Owens + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/dma-mapping.h> +#include <asm/thread_info.h> +#include <asm/memory.h> +#include <asm/cputable.h> +#include <asm/vdso_datapage.h> +#include <linux/kbuild.h> + +int main(void) +{ + DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); + BLANK(); + DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); + BLANK(); + DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); + DEFINE(S_X1, offsetof(struct pt_regs, regs[1])); + DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); + DEFINE(S_X3, offsetof(struct pt_regs, regs[3])); + DEFINE(S_X4, offsetof(struct pt_regs, regs[4])); + DEFINE(S_X5, offsetof(struct pt_regs, regs[5])); + DEFINE(S_X6, offsetof(struct pt_regs, regs[6])); + DEFINE(S_X7, offsetof(struct pt_regs, regs[7])); + DEFINE(S_LR, offsetof(struct pt_regs, regs[30])); + DEFINE(S_SP, offsetof(struct pt_regs, sp)); +#ifdef CONFIG_COMPAT + DEFINE(S_COMPAT_SP, offsetof(struct pt_regs, compat_sp)); +#endif + DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate)); + DEFINE(S_PC, offsetof(struct pt_regs, pc)); + DEFINE(S_ORIG_X0, offsetof(struct pt_regs, orig_x0)); + DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno)); + DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs)); + BLANK(); + DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); + DEFINE(VMA_VM_FLAGS, offsetof(struct vm_area_struct, vm_flags)); + BLANK(); + DEFINE(VM_EXEC, VM_EXEC); + BLANK(); + DEFINE(PAGE_SZ, PAGE_SIZE); + BLANK(); + DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info)); + DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup)); + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); + BLANK(); + DEFINE(CLOCK_REALTIME, CLOCK_REALTIME); + DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC); + DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); + DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE); + DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE); + DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC); + DEFINE(NSEC_PER_SEC, NSEC_PER_SEC); + BLANK(); + DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last)); + DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec)); + DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec)); + DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec)); + DEFINE(VDSO_XTIME_CRS_NSEC, offsetof(struct vdso_data, xtime_coarse_nsec)); + DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec)); + DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec)); + DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count)); + DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult)); + DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift)); + DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest)); + DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime)); + DEFINE(VDSO_USE_SYSCALL, offsetof(struct vdso_data, use_syscall)); + BLANK(); + DEFINE(TVAL_TV_SEC, offsetof(struct timeval, tv_sec)); + DEFINE(TVAL_TV_USEC, offsetof(struct timeval, tv_usec)); + DEFINE(TSPEC_TV_SEC, offsetof(struct timespec, tv_sec)); + DEFINE(TSPEC_TV_NSEC, offsetof(struct timespec, tv_nsec)); + BLANK(); + DEFINE(TZ_MINWEST, offsetof(struct timezone, tz_minuteswest)); + DEFINE(TZ_DSTTIME, offsetof(struct timezone, tz_dsttime)); + return 0; +} diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c new file mode 100644 index 000000000000..63cfc4a43f4e --- /dev/null +++ b/arch/arm64/kernel/cputable.c @@ -0,0 +1,33 @@ +/* + * arch/arm64/kernel/cputable.c + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> + +#include <asm/cputable.h> + +extern unsigned long __cpu_setup(void); + +struct cpu_info __initdata cpu_table[] = { + { + .cpu_id_val = 0x000f0000, + .cpu_id_mask = 0x000f0000, + .cpu_name = "AArch64 Processor", + .cpu_setup = __cpu_setup, + }, + { /* Empty */ }, +}; diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c new file mode 100644 index 000000000000..0c3ba9f51376 --- /dev/null +++ b/arch/arm64/kernel/debug-monitors.c @@ -0,0 +1,288 @@ +/* + * ARMv8 single-step debug support and mdscr context switching. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/cpu.h> +#include <linux/debugfs.h> +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/ptrace.h> +#include <linux/stat.h> + +#include <asm/debug-monitors.h> +#include <asm/local.h> +#include <asm/cputype.h> +#include <asm/system_misc.h> + +/* Low-level stepping controls. */ +#define DBG_MDSCR_SS (1 << 0) +#define DBG_SPSR_SS (1 << 21) + +/* MDSCR_EL1 enabling bits */ +#define DBG_MDSCR_KDE (1 << 13) +#define DBG_MDSCR_MDE (1 << 15) +#define DBG_MDSCR_MASK ~(DBG_MDSCR_KDE | DBG_MDSCR_MDE) + +/* Determine debug architecture. */ +u8 debug_monitors_arch(void) +{ + return read_cpuid(ID_AA64DFR0_EL1) & 0xf; +} + +/* + * MDSCR access routines. + */ +static void mdscr_write(u32 mdscr) +{ + unsigned long flags; + local_dbg_save(flags); + asm volatile("msr mdscr_el1, %0" :: "r" (mdscr)); + local_dbg_restore(flags); +} + +static u32 mdscr_read(void) +{ + u32 mdscr; + asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr)); + return mdscr; +} + +/* + * Allow root to disable self-hosted debug from userspace. + * This is useful if you want to connect an external JTAG debugger. + */ +static u32 debug_enabled = 1; + +static int create_debug_debugfs_entry(void) +{ + debugfs_create_bool("debug_enabled", 0644, NULL, &debug_enabled); + return 0; +} +fs_initcall(create_debug_debugfs_entry); + +static int __init early_debug_disable(char *buf) +{ + debug_enabled = 0; + return 0; +} + +early_param("nodebugmon", early_debug_disable); + +/* + * Keep track of debug users on each core. + * The ref counts are per-cpu so we use a local_t type. + */ +static DEFINE_PER_CPU(local_t, mde_ref_count); +static DEFINE_PER_CPU(local_t, kde_ref_count); + +void enable_debug_monitors(enum debug_el el) +{ + u32 mdscr, enable = 0; + + WARN_ON(preemptible()); + + if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) + enable = DBG_MDSCR_MDE; + + if (el == DBG_ACTIVE_EL1 && + local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) + enable |= DBG_MDSCR_KDE; + + if (enable && debug_enabled) { + mdscr = mdscr_read(); + mdscr |= enable; + mdscr_write(mdscr); + } +} + +void disable_debug_monitors(enum debug_el el) +{ + u32 mdscr, disable = 0; + + WARN_ON(preemptible()); + + if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) + disable = ~DBG_MDSCR_MDE; + + if (el == DBG_ACTIVE_EL1 && + local_dec_and_test(&__get_cpu_var(kde_ref_count))) + disable &= ~DBG_MDSCR_KDE; + + if (disable) { + mdscr = mdscr_read(); + mdscr &= disable; + mdscr_write(mdscr); + } +} + +/* + * OS lock clearing. + */ +static void clear_os_lock(void *unused) +{ + asm volatile("msr mdscr_el1, %0" : : "r" (0)); + isb(); + asm volatile("msr oslar_el1, %0" : : "r" (0)); + isb(); +} + +static int __cpuinit os_lock_notify(struct notifier_block *self, + unsigned long action, void *data) +{ + int cpu = (unsigned long)data; + if (action == CPU_ONLINE) + smp_call_function_single(cpu, clear_os_lock, NULL, 1); + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata os_lock_nb = { + .notifier_call = os_lock_notify, +}; + +static int __cpuinit debug_monitors_init(void) +{ + /* Clear the OS lock. */ + smp_call_function(clear_os_lock, NULL, 1); + clear_os_lock(NULL); + + /* Register hotplug handler. */ + register_cpu_notifier(&os_lock_nb); + return 0; +} +postcore_initcall(debug_monitors_init); + +/* + * Single step API and exception handling. + */ +static void set_regs_spsr_ss(struct pt_regs *regs) +{ + unsigned long spsr; + + spsr = regs->pstate; + spsr &= ~DBG_SPSR_SS; + spsr |= DBG_SPSR_SS; + regs->pstate = spsr; +} + +static void clear_regs_spsr_ss(struct pt_regs *regs) +{ + unsigned long spsr; + + spsr = regs->pstate; + spsr &= ~DBG_SPSR_SS; + regs->pstate = spsr; +} + +static int single_step_handler(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + siginfo_t info; + + /* + * If we are stepping a pending breakpoint, call the hw_breakpoint + * handler first. + */ + if (!reinstall_suspended_bps(regs)) + return 0; + + if (user_mode(regs)) { + info.si_signo = SIGTRAP; + info.si_errno = 0; + info.si_code = TRAP_HWBKPT; + info.si_addr = (void __user *)instruction_pointer(regs); + force_sig_info(SIGTRAP, &info, current); + + /* + * ptrace will disable single step unless explicitly + * asked to re-enable it. For other clients, it makes + * sense to leave it enabled (i.e. rewind the controls + * to the active-not-pending state). + */ + user_rewind_single_step(current); + } else { + /* TODO: route to KGDB */ + pr_warning("Unexpected kernel single-step exception at EL1\n"); + /* + * Re-enable stepping since we know that we will be + * returning to regs. + */ + set_regs_spsr_ss(regs); + } + + return 0; +} + +static int __init single_step_init(void) +{ + hook_debug_fault_code(DBG_ESR_EVT_HWSS, single_step_handler, SIGTRAP, + TRAP_HWBKPT, "single-step handler"); + return 0; +} +arch_initcall(single_step_init); + +/* Re-enable single step for syscall restarting. */ +void user_rewind_single_step(struct task_struct *task) +{ + /* + * If single step is active for this thread, then set SPSR.SS + * to 1 to avoid returning to the active-pending state. + */ + if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + set_regs_spsr_ss(task_pt_regs(task)); +} + +void user_fastforward_single_step(struct task_struct *task) +{ + if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP)) + clear_regs_spsr_ss(task_pt_regs(task)); +} + +/* Kernel API */ +void kernel_enable_single_step(struct pt_regs *regs) +{ + WARN_ON(!irqs_disabled()); + set_regs_spsr_ss(regs); + mdscr_write(mdscr_read() | DBG_MDSCR_SS); + enable_debug_monitors(DBG_ACTIVE_EL1); +} + +void kernel_disable_single_step(void) +{ + WARN_ON(!irqs_disabled()); + mdscr_write(mdscr_read() & ~DBG_MDSCR_SS); + disable_debug_monitors(DBG_ACTIVE_EL1); +} + +int kernel_active_single_step(void) +{ + WARN_ON(!irqs_disabled()); + return mdscr_read() & DBG_MDSCR_SS; +} + +/* ptrace API */ +void user_enable_single_step(struct task_struct *task) +{ + set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); + set_regs_spsr_ss(task_pt_regs(task)); +} + +void user_disable_single_step(struct task_struct *task) +{ + clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP); +} diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S new file mode 100644 index 000000000000..17988a6e7ea2 --- /dev/null +++ b/arch/arm64/kernel/entry-fpsimd.S @@ -0,0 +1,80 @@ +/* + * FP/SIMD state saving and restoring + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> + +#include <asm/assembler.h> + +/* + * Save the FP registers. + * + * x0 - pointer to struct fpsimd_state + */ +ENTRY(fpsimd_save_state) + stp q0, q1, [x0, #16 * 0] + stp q2, q3, [x0, #16 * 2] + stp q4, q5, [x0, #16 * 4] + stp q6, q7, [x0, #16 * 6] + stp q8, q9, [x0, #16 * 8] + stp q10, q11, [x0, #16 * 10] + stp q12, q13, [x0, #16 * 12] + stp q14, q15, [x0, #16 * 14] + stp q16, q17, [x0, #16 * 16] + stp q18, q19, [x0, #16 * 18] + stp q20, q21, [x0, #16 * 20] + stp q22, q23, [x0, #16 * 22] + stp q24, q25, [x0, #16 * 24] + stp q26, q27, [x0, #16 * 26] + stp q28, q29, [x0, #16 * 28] + stp q30, q31, [x0, #16 * 30]! + mrs x8, fpsr + str w8, [x0, #16 * 2] + mrs x8, fpcr + str w8, [x0, #16 * 2 + 4] + ret +ENDPROC(fpsimd_save_state) + +/* + * Load the FP registers. + * + * x0 - pointer to struct fpsimd_state + */ +ENTRY(fpsimd_load_state) + ldp q0, q1, [x0, #16 * 0] + ldp q2, q3, [x0, #16 * 2] + ldp q4, q5, [x0, #16 * 4] + ldp q6, q7, [x0, #16 * 6] + ldp q8, q9, [x0, #16 * 8] + ldp q10, q11, [x0, #16 * 10] + ldp q12, q13, [x0, #16 * 12] + ldp q14, q15, [x0, #16 * 14] + ldp q16, q17, [x0, #16 * 16] + ldp q18, q19, [x0, #16 * 18] + ldp q20, q21, [x0, #16 * 20] + ldp q22, q23, [x0, #16 * 22] + ldp q24, q25, [x0, #16 * 24] + ldp q26, q27, [x0, #16 * 26] + ldp q28, q29, [x0, #16 * 28] + ldp q30, q31, [x0, #16 * 30]! + ldr w8, [x0, #16 * 2] + ldr w9, [x0, #16 * 2 + 4] + msr fpsr, x8 + msr fpcr, x9 + ret +ENDPROC(fpsimd_load_state) diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S new file mode 100644 index 000000000000..38cf853a3667 --- /dev/null +++ b/arch/arm64/kernel/entry.S @@ -0,0 +1,695 @@ +/* + * Low-level exception handling code + * + * Copyright (C) 2012 ARM Ltd. + * Authors: Catalin Marinas <catalin.marinas@arm.com> + * Will Deacon <will.deacon@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/init.h> +#include <linux/linkage.h> + +#include <asm/assembler.h> +#include <asm/asm-offsets.h> +#include <asm/errno.h> +#include <asm/thread_info.h> +#include <asm/unistd.h> + +/* + * Bad Abort numbers + *----------------- + */ +#define BAD_SYNC 0 +#define BAD_IRQ 1 +#define BAD_FIQ 2 +#define BAD_ERROR 3 + + .macro kernel_entry, el, regsize = 64 + sub sp, sp, #S_FRAME_SIZE - S_LR // room for LR, SP, SPSR, ELR + .if \regsize == 32 + mov w0, w0 // zero upper 32 bits of x0 + .endif + push x28, x29 + push x26, x27 + push x24, x25 + push x22, x23 + push x20, x21 + push x18, x19 + push x16, x17 + push x14, x15 + push x12, x13 + push x10, x11 + push x8, x9 + push x6, x7 + push x4, x5 + push x2, x3 + push x0, x1 + .if \el == 0 + mrs x21, sp_el0 + .else + add x21, sp, #S_FRAME_SIZE + .endif + mrs x22, elr_el1 + mrs x23, spsr_el1 + stp lr, x21, [sp, #S_LR] + stp x22, x23, [sp, #S_PC] + + /* + * Set syscallno to -1 by default (overridden later if real syscall). + */ + .if \el == 0 + mvn x21, xzr + str x21, [sp, #S_SYSCALLNO] + .endif + + /* + * Registers that may be useful after this macro is invoked: + * + * x21 - aborted SP + * x22 - aborted PC + * x23 - aborted PSTATE + */ + .endm + + .macro kernel_exit, el, ret = 0 + ldp x21, x22, [sp, #S_PC] // load ELR, SPSR + .if \el == 0 + ldr x23, [sp, #S_SP] // load return stack pointer + .endif + .if \ret + ldr x1, [sp, #S_X1] // preserve x0 (syscall return) + add sp, sp, S_X2 + .else + pop x0, x1 + .endif + pop x2, x3 // load the rest of the registers + pop x4, x5 + pop x6, x7 + pop x8, x9 + msr elr_el1, x21 // set up the return data + msr spsr_el1, x22 + .if \el == 0 + msr sp_el0, x23 + .endif + pop x10, x11 + pop x12, x13 + pop x14, x15 + pop x16, x17 + pop x18, x19 + pop x20, x21 + pop x22, x23 + pop x24, x25 + pop x26, x27 + pop x28, x29 + ldr lr, [sp], #S_FRAME_SIZE - S_LR // load LR and restore SP + eret // return to kernel + .endm + + .macro get_thread_info, rd + mov \rd, sp + and \rd, \rd, #~((1 << 13) - 1) // top of 8K stack + .endm + +/* + * These are the registers used in the syscall handler, and allow us to + * have in theory up to 7 arguments to a function - x0 to x6. + * + * x7 is reserved for the system call number in 32-bit mode. + */ +sc_nr .req x25 // number of system calls +scno .req x26 // syscall number +stbl .req x27 // syscall table pointer +tsk .req x28 // current thread_info + +/* + * Interrupt handling. + */ + .macro irq_handler + ldr x1, handle_arch_irq + mov x0, sp + blr x1 + .endm + + .text + +/* + * Exception vectors. + */ + .macro ventry label + .align 7 + b \label + .endm + + .align 11 +ENTRY(vectors) + ventry el1_sync_invalid // Synchronous EL1t + ventry el1_irq_invalid // IRQ EL1t + ventry el1_fiq_invalid // FIQ EL1t + ventry el1_error_invalid // Error EL1t + + ventry el1_sync // Synchronous EL1h + ventry el1_irq // IRQ EL1h + ventry el1_fiq_invalid // FIQ EL1h + ventry el1_error_invalid // Error EL1h + + ventry el0_sync // Synchronous 64-bit EL0 + ventry el0_irq // IRQ 64-bit EL0 + ventry el0_fiq_invalid // FIQ 64-bit EL0 + ventry el0_error_invalid // Error 64-bit EL0 + +#ifdef CONFIG_COMPAT + ventry el0_sync_compat // Synchronous 32-bit EL0 + ventry el0_irq_compat // IRQ 32-bit EL0 + ventry el0_fiq_invalid_compat // FIQ 32-bit EL0 + ventry el0_error_invalid_compat // Error 32-bit EL0 +#else + ventry el0_sync_invalid // Synchronous 32-bit EL0 + ventry el0_irq_invalid // IRQ 32-bit EL0 + ventry el0_fiq_invalid // FIQ 32-bit EL0 + ventry el0_error_invalid // Error 32-bit EL0 +#endif +END(vectors) + +/* + * Invalid mode handlers + */ + .macro inv_entry, el, reason, regsize = 64 + kernel_entry el, \regsize + mov x0, sp + mov x1, #\reason + mrs x2, esr_el1 + b bad_mode + .endm + +el0_sync_invalid: + inv_entry 0, BAD_SYNC +ENDPROC(el0_sync_invalid) + +el0_irq_invalid: + inv_entry 0, BAD_IRQ +ENDPROC(el0_irq_invalid) + +el0_fiq_invalid: + inv_entry 0, BAD_FIQ +ENDPROC(el0_fiq_invalid) + +el0_error_invalid: + inv_entry 0, BAD_ERROR +ENDPROC(el0_error_invalid) + +#ifdef CONFIG_COMPAT +el0_fiq_invalid_compat: + inv_entry 0, BAD_FIQ, 32 +ENDPROC(el0_fiq_invalid_compat) + +el0_error_invalid_compat: + inv_entry 0, BAD_ERROR, 32 +ENDPROC(el0_error_invalid_compat) +#endif + +el1_sync_invalid: + inv_entry 1, BAD_SYNC +ENDPROC(el1_sync_invalid) + +el1_irq_invalid: + inv_entry 1, BAD_IRQ +ENDPROC(el1_irq_invalid) + +el1_fiq_invalid: + inv_entry 1, BAD_FIQ +ENDPROC(el1_fiq_invalid) + +el1_error_invalid: + inv_entry 1, BAD_ERROR +ENDPROC(el1_error_invalid) + +/* + * EL1 mode handlers. + */ + .align 6 +el1_sync: + kernel_entry 1 + mrs x1, esr_el1 // read the syndrome register + lsr x24, x1, #26 // exception class + cmp x24, #0x25 // data abort in EL1 + b.eq el1_da + cmp x24, #0x18 // configurable trap + b.eq el1_undef + cmp x24, #0x26 // stack alignment exception + b.eq el1_sp_pc + cmp x24, #0x22 // pc alignment exception + b.eq el1_sp_pc + cmp x24, #0x00 // unknown exception in EL1 + b.eq el1_undef + cmp x24, #0x30 // debug exception in EL1 + b.ge el1_dbg + b el1_inv +el1_da: + /* + * Data abort handling + */ + mrs x0, far_el1 + enable_dbg_if_not_stepping x2 + // re-enable interrupts if they were enabled in the aborted context + tbnz x23, #7, 1f // PSR_I_BIT + enable_irq +1: + mov x2, sp // struct pt_regs + bl do_mem_abort + + // disable interrupts before pulling preserved data off the stack + disable_irq + kernel_exit 1 +el1_sp_pc: + /* + * Stack or PC alignment exception handling + */ + mrs x0, far_el1 + mov x1, x25 + mov x2, sp + b do_sp_pc_abort +el1_undef: + /* + * Undefined instruction + */ + mov x0, sp + b do_undefinstr +el1_dbg: + /* + * Debug exception handling + */ + tbz x24, #0, el1_inv // EL1 only + mrs x0, far_el1 + mov x2, sp // struct pt_regs + bl do_debug_exception + + kernel_exit 1 +el1_inv: + // TODO: add support for undefined instructions in kernel mode + mov x0, sp + mov x1, #BAD_SYNC + mrs x2, esr_el1 + b bad_mode +ENDPROC(el1_sync) + + .align 6 +el1_irq: + kernel_entry 1 + enable_dbg_if_not_stepping x0 +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif +#ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr x24, [tsk, #TI_PREEMPT] // get preempt count + add x0, x24, #1 // increment it + str x0, [tsk, #TI_PREEMPT] +#endif + irq_handler +#ifdef CONFIG_PREEMPT + str x24, [tsk, #TI_PREEMPT] // restore preempt count + cbnz x24, 1f // preempt count != 0 + ldr x0, [tsk, #TI_FLAGS] // get flags + tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? + bl el1_preempt +1: +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_on +#endif + kernel_exit 1 +ENDPROC(el1_irq) + +#ifdef CONFIG_PREEMPT +el1_preempt: + mov x24, lr +1: enable_dbg + bl preempt_schedule_irq // irq en/disable is done inside + ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS + tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? + ret x24 +#endif + +/* + * EL0 mode handlers. + */ + .align 6 +el0_sync: + kernel_entry 0 + mrs x25, esr_el1 // read the syndrome register + lsr x24, x25, #26 // exception class + cmp x24, #0x15 // SVC in 64-bit state + b.eq el0_svc + adr lr, ret_from_exception + cmp x24, #0x24 // data abort in EL0 + b.eq el0_da + cmp x24, #0x20 // instruction abort in EL0 + b.eq el0_ia + cmp x24, #0x07 // FP/ASIMD access + b.eq el0_fpsimd_acc + cmp x24, #0x2c // FP/ASIMD exception + b.eq el0_fpsimd_exc + cmp x24, #0x18 // configurable trap + b.eq el0_undef + cmp x24, #0x26 // stack alignment exception + b.eq el0_sp_pc + cmp x24, #0x22 // pc alignment exception + b.eq el0_sp_pc + cmp x24, #0x00 // unknown exception in EL0 + b.eq el0_undef + cmp x24, #0x30 // debug exception in EL0 + b.ge el0_dbg + b el0_inv + +#ifdef CONFIG_COMPAT + .align 6 +el0_sync_compat: + kernel_entry 0, 32 + mrs x25, esr_el1 // read the syndrome register + lsr x24, x25, #26 // exception class + cmp x24, #0x11 // SVC in 32-bit state + b.eq el0_svc_compat + adr lr, ret_from_exception + cmp x24, #0x24 // data abort in EL0 + b.eq el0_da + cmp x24, #0x20 // instruction abort in EL0 + b.eq el0_ia + cmp x24, #0x07 // FP/ASIMD access + b.eq el0_fpsimd_acc + cmp x24, #0x28 // FP/ASIMD exception + b.eq el0_fpsimd_exc + cmp x24, #0x00 // unknown exception in EL0 + b.eq el0_undef + cmp x24, #0x30 // debug exception in EL0 + b.ge el0_dbg + b el0_inv +el0_svc_compat: + /* + * AArch32 syscall handling + */ + adr stbl, compat_sys_call_table // load compat syscall table pointer + uxtw scno, w7 // syscall number in w7 (r7) + mov sc_nr, #__NR_compat_syscalls + b el0_svc_naked + + .align 6 +el0_irq_compat: + kernel_entry 0, 32 + b el0_irq_naked +#endif + +el0_da: + /* + * Data abort handling + */ + mrs x0, far_el1 + disable_step x1 + isb + enable_dbg + // enable interrupts before calling the main handler + enable_irq + mov x1, x25 + mov x2, sp + b do_mem_abort +el0_ia: + /* + * Instruction abort handling + */ + mrs x0, far_el1 + disable_step x1 + isb + enable_dbg + // enable interrupts before calling the main handler + enable_irq + orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts + mov x2, sp + b do_mem_abort +el0_fpsimd_acc: + /* + * Floating Point or Advanced SIMD access + */ + mov x0, x25 + mov x1, sp + b do_fpsimd_acc +el0_fpsimd_exc: + /* + * Floating Point or Advanced SIMD exception + */ + mov x0, x25 + mov x1, sp + b do_fpsimd_exc +el0_sp_pc: + /* + * Stack or PC alignment exception handling + */ + mrs x0, far_el1 + disable_step x1 + isb + enable_dbg + // enable interrupts before calling the main handler + enable_irq + mov x1, x25 + mov x2, sp + b do_sp_pc_abort +el0_undef: + /* + * Undefined instruction + */ + mov x0, sp + b do_undefinstr +el0_dbg: + /* + * Debug exception handling + */ + tbnz x24, #0, el0_inv // EL0 only + mrs x0, far_el1 + disable_step x1 + mov x1, x25 + mov x2, sp + b do_debug_exception +el0_inv: + mov x0, sp + mov x1, #BAD_SYNC + mrs x2, esr_el1 + b bad_mode +ENDPROC(el0_sync) + + .align 6 +el0_irq: + kernel_entry 0 +el0_irq_naked: + disable_step x1 + isb + enable_dbg +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif + get_thread_info tsk +#ifdef CONFIG_PREEMPT + ldr x24, [tsk, #TI_PREEMPT] // get preempt count + add x23, x24, #1 // increment it + str x23, [tsk, #TI_PREEMPT] +#endif + irq_handler +#ifdef CONFIG_PREEMPT + ldr x0, [tsk, #TI_PREEMPT] + str x24, [tsk, #TI_PREEMPT] + cmp x0, x23 + b.eq 1f + mov x1, #0 + str x1, [x1] // BUG +1: +#endif +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_on +#endif + b ret_to_user +ENDPROC(el0_irq) + +/* + * This is the return code to user mode for abort handlers + */ +ret_from_exception: + get_thread_info tsk + b ret_to_user +ENDPROC(ret_from_exception) + +/* + * Register switch for AArch64. The callee-saved registers need to be saved + * and restored. On entry: + * x0 = previous task_struct (must be preserved across the switch) + * x1 = next task_struct + * Previous and next are guaranteed not to be the same. + * + */ +ENTRY(cpu_switch_to) + add x8, x0, #THREAD_CPU_CONTEXT + mov x9, sp + stp x19, x20, [x8], #16 // store callee-saved registers + stp x21, x22, [x8], #16 + stp x23, x24, [x8], #16 + stp x25, x26, [x8], #16 + stp x27, x28, [x8], #16 + stp x29, x9, [x8], #16 + str lr, [x8] + add x8, x1, #THREAD_CPU_CONTEXT + ldp x19, x20, [x8], #16 // restore callee-saved registers + ldp x21, x22, [x8], #16 + ldp x23, x24, [x8], #16 + ldp x25, x26, [x8], #16 + ldp x27, x28, [x8], #16 + ldp x29, x9, [x8], #16 + ldr lr, [x8] + mov sp, x9 + ret +ENDPROC(cpu_switch_to) + +/* + * This is the fast syscall return path. We do as little as possible here, + * and this includes saving x0 back into the kernel stack. + */ +ret_fast_syscall: + disable_irq // disable interrupts + ldr x1, [tsk, #TI_FLAGS] + and x2, x1, #_TIF_WORK_MASK + cbnz x2, fast_work_pending + tbz x1, #TIF_SINGLESTEP, fast_exit + disable_dbg + enable_step x2 +fast_exit: + kernel_exit 0, ret = 1 + +/* + * Ok, we need to do extra processing, enter the slow path. + */ +fast_work_pending: + str x0, [sp, #S_X0] // returned x0 +work_pending: + tbnz x1, #TIF_NEED_RESCHED, work_resched + /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */ + ldr x2, [sp, #S_PSTATE] + mov x0, sp // 'regs' + tst x2, #PSR_MODE_MASK // user mode regs? + b.ne no_work_pending // returning to kernel + bl do_notify_resume + b ret_to_user +work_resched: + enable_dbg + bl schedule + +/* + * "slow" syscall return path. + */ +ENTRY(ret_to_user) + disable_irq // disable interrupts + ldr x1, [tsk, #TI_FLAGS] + and x2, x1, #_TIF_WORK_MASK + cbnz x2, work_pending + tbz x1, #TIF_SINGLESTEP, no_work_pending + disable_dbg + enable_step x2 +no_work_pending: + kernel_exit 0, ret = 0 +ENDPROC(ret_to_user) + +/* + * This is how we return from a fork. + */ +ENTRY(ret_from_fork) + bl schedule_tail + get_thread_info tsk + b ret_to_user +ENDPROC(ret_from_fork) + +/* + * SVC handler. + */ + .align 6 +el0_svc: + adrp stbl, sys_call_table // load syscall table pointer + uxtw scno, w8 // syscall number in w8 + mov sc_nr, #__NR_syscalls +el0_svc_naked: // compat entry point + stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number + disable_step x16 + isb + enable_dbg + enable_irq + + get_thread_info tsk + ldr x16, [tsk, #TI_FLAGS] // check for syscall tracing + tbnz x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls? + adr lr, ret_fast_syscall // return address + cmp scno, sc_nr // check upper syscall limit + b.hs ni_sys + ldr x16, [stbl, scno, lsl #3] // address in the syscall table + br x16 // call sys_* routine +ni_sys: + mov x0, sp + b do_ni_syscall +ENDPROC(el0_svc) + + /* + * This is the really slow path. We're going to be doing context + * switches, and waiting for our parent to respond. + */ +__sys_trace: + mov x1, sp + mov w0, #0 // trace entry + bl syscall_trace + adr lr, __sys_trace_return // return address + uxtw scno, w0 // syscall number (possibly new) + mov x1, sp // pointer to regs + cmp scno, sc_nr // check upper syscall limit + b.hs ni_sys + ldp x0, x1, [sp] // restore the syscall args + ldp x2, x3, [sp, #S_X2] + ldp x4, x5, [sp, #S_X4] + ldp x6, x7, [sp, #S_X6] + ldr x16, [stbl, scno, lsl #3] // address in the syscall table + br x16 // call sys_* routine + +__sys_trace_return: + str x0, [sp] // save returned x0 + mov x1, sp + mov w0, #1 // trace exit + bl syscall_trace + b ret_to_user + +/* + * Special system call wrappers. + */ +ENTRY(sys_execve_wrapper) + mov x3, sp + b sys_execve +ENDPROC(sys_execve_wrapper) + +ENTRY(sys_clone_wrapper) + mov x5, sp + b sys_clone +ENDPROC(sys_clone_wrapper) + +ENTRY(sys_rt_sigreturn_wrapper) + mov x0, sp + b sys_rt_sigreturn +ENDPROC(sys_rt_sigreturn_wrapper) + +ENTRY(sys_sigaltstack_wrapper) + ldr x2, [sp, #S_SP] + b sys_sigaltstack +ENDPROC(sys_sigaltstack_wrapper) + +ENTRY(handle_arch_irq) + .quad 0 diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c new file mode 100644 index 000000000000..e8b8357aedb4 --- /dev/null +++ b/arch/arm64/kernel/fpsimd.c @@ -0,0 +1,106 @@ +/* + * FP/SIMD context switching and fault handling + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/signal.h> + +#include <asm/fpsimd.h> +#include <asm/cputype.h> + +#define FPEXC_IOF (1 << 0) +#define FPEXC_DZF (1 << 1) +#define FPEXC_OFF (1 << 2) +#define FPEXC_UFF (1 << 3) +#define FPEXC_IXF (1 << 4) +#define FPEXC_IDF (1 << 7) + +/* + * Trapped FP/ASIMD access. + */ +void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) +{ + /* TODO: implement lazy context saving/restoring */ + WARN_ON(1); +} + +/* + * Raise a SIGFPE for the current process. + */ +void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) +{ + siginfo_t info; + unsigned int si_code = 0; + + if (esr & FPEXC_IOF) + si_code = FPE_FLTINV; + else if (esr & FPEXC_DZF) + si_code = FPE_FLTDIV; + else if (esr & FPEXC_OFF) + si_code = FPE_FLTOVF; + else if (esr & FPEXC_UFF) + si_code = FPE_FLTUND; + else if (esr & FPEXC_IXF) + si_code = FPE_FLTRES; + + memset(&info, 0, sizeof(info)); + info.si_signo = SIGFPE; + info.si_code = si_code; + info.si_addr = (void __user *)instruction_pointer(regs); + + send_sig_info(SIGFPE, &info, current); +} + +void fpsimd_thread_switch(struct task_struct *next) +{ + /* check if not kernel threads */ + if (current->mm) + fpsimd_save_state(¤t->thread.fpsimd_state); + if (next->mm) + fpsimd_load_state(&next->thread.fpsimd_state); +} + +void fpsimd_flush_thread(void) +{ + memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + fpsimd_load_state(¤t->thread.fpsimd_state); +} + +/* + * FP/SIMD support code initialisation. + */ +static int __init fpsimd_init(void) +{ + u64 pfr = read_cpuid(ID_AA64PFR0_EL1); + + if (pfr & (0xf << 16)) { + pr_notice("Floating-point is not implemented\n"); + return 0; + } + elf_hwcap |= HWCAP_FP; + + if (pfr & (0xf << 20)) + pr_notice("Advanced SIMD is not implemented\n"); + else + elf_hwcap |= HWCAP_ASIMD; + + return 0; +} +late_initcall(fpsimd_init); diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S new file mode 100644 index 000000000000..a2f02b63eae9 --- /dev/null +++ b/arch/arm64/kernel/head.S @@ -0,0 +1,510 @@ +/* + * Low-level CPU initialisation + * Based on arch/arm/kernel/head.S + * + * Copyright (C) 1994-2002 Russell King + * Copyright (C) 2003-2012 ARM Ltd. + * Authors: Catalin Marinas <catalin.marinas@arm.com> + * Will Deacon <will.deacon@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> +#include <linux/init.h> + +#include <asm/assembler.h> +#include <asm/ptrace.h> +#include <asm/asm-offsets.h> +#include <asm/memory.h> +#include <asm/thread_info.h> +#include <asm/pgtable-hwdef.h> +#include <asm/pgtable.h> +#include <asm/page.h> + +/* + * swapper_pg_dir is the virtual address of the initial page table. We place + * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has + * 2 pages and is placed below swapper_pg_dir. + */ +#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) + +#if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 +#error KERNEL_RAM_VADDR must start at 0xXXX80000 +#endif + +#define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) +#define IDMAP_DIR_SIZE (2 * PAGE_SIZE) + + .globl swapper_pg_dir + .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE + + .globl idmap_pg_dir + .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE + + .macro pgtbl, ttb0, ttb1, phys + add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE + sub \ttb0, \ttb1, #IDMAP_DIR_SIZE + .endm + +#ifdef CONFIG_ARM64_64K_PAGES +#define BLOCK_SHIFT PAGE_SHIFT +#define BLOCK_SIZE PAGE_SIZE +#else +#define BLOCK_SHIFT SECTION_SHIFT +#define BLOCK_SIZE SECTION_SIZE +#endif + +#define KERNEL_START KERNEL_RAM_VADDR +#define KERNEL_END _end + +/* + * Initial memory map attributes. + */ +#ifndef CONFIG_SMP +#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF +#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF +#else +#define PTE_FLAGS PTE_TYPE_PAGE | PTE_AF | PTE_SHARED +#define PMD_FLAGS PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S +#endif + +#ifdef CONFIG_ARM64_64K_PAGES +#define MM_MMUFLAGS PTE_ATTRINDX(MT_NORMAL) | PTE_FLAGS +#define IO_MMUFLAGS PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_XN | PTE_FLAGS +#else +#define MM_MMUFLAGS PMD_ATTRINDX(MT_NORMAL) | PMD_FLAGS +#define IO_MMUFLAGS PMD_ATTRINDX(MT_DEVICE_nGnRE) | PMD_SECT_XN | PMD_FLAGS +#endif + +/* + * Kernel startup entry point. + * --------------------------- + * + * The requirements are: + * MMU = off, D-cache = off, I-cache = on or off, + * x0 = physical address to the FDT blob. + * + * This code is mostly position independent so you call this at + * __pa(PAGE_OFFSET + TEXT_OFFSET). + * + * Note that the callee-saved registers are used for storing variables + * that are useful before the MMU is enabled. The allocations are described + * in the entry routines. + */ + __HEAD + + /* + * DO NOT MODIFY. Image header expected by Linux boot-loaders. + */ + b stext // branch to kernel start, magic + .long 0 // reserved + .quad TEXT_OFFSET // Image load offset from start of RAM + .quad 0 // reserved + .quad 0 // reserved + +ENTRY(stext) + mov x21, x0 // x21=FDT + bl el2_setup // Drop to EL1 + mrs x22, midr_el1 // x22=cpuid + mov x0, x22 + bl lookup_processor_type + mov x23, x0 // x23=current cpu_table + cbz x23, __error_p // invalid processor (x23=0)? + bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + bl __vet_fdt + bl __create_page_tables // x25=TTBR0, x26=TTBR1 + /* + * The following calls CPU specific code in a position independent + * manner. See arch/arm64/mm/proc.S for details. x23 = base of + * cpu_info structure selected by lookup_processor_type above. + * On return, the CPU will be ready for the MMU to be turned on and + * the TCR will have been set. + */ + ldr x27, __switch_data // address to jump to after + // MMU has been enabled + adr lr, __enable_mmu // return (PIC) address + ldr x12, [x23, #CPU_INFO_SETUP] + add x12, x12, x28 // __virt_to_phys + br x12 // initialise processor +ENDPROC(stext) + +/* + * If we're fortunate enough to boot at EL2, ensure that the world is + * sane before dropping to EL1. + */ +ENTRY(el2_setup) + mrs x0, CurrentEL + cmp x0, #PSR_MODE_EL2t + ccmp x0, #PSR_MODE_EL2h, #0x4, ne + b.eq 1f + ret + + /* Hyp configuration. */ +1: mov x0, #(1 << 31) // 64-bit EL1 + msr hcr_el2, x0 + + /* Generic timers. */ + mrs x0, cnthctl_el2 + orr x0, x0, #3 // Enable EL1 physical timers + msr cnthctl_el2, x0 + + /* Populate ID registers. */ + mrs x0, midr_el1 + mrs x1, mpidr_el1 + msr vpidr_el2, x0 + msr vmpidr_el2, x1 + + /* sctlr_el1 */ + mov x0, #0x0800 // Set/clear RES{1,0} bits + movk x0, #0x30d0, lsl #16 + msr sctlr_el1, x0 + + /* Coprocessor traps. */ + mov x0, #0x33ff + msr cptr_el2, x0 // Disable copro. traps to EL2 + +#ifdef CONFIG_COMPAT + msr hstr_el2, xzr // Disable CP15 traps to EL2 +#endif + + /* spsr */ + mov x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ + PSR_MODE_EL1h) + msr spsr_el2, x0 + msr elr_el2, lr + eret +ENDPROC(el2_setup) + + .align 3 +2: .quad . + .quad PAGE_OFFSET + +#ifdef CONFIG_SMP + .pushsection .smp.pen.text, "ax" + .align 3 +1: .quad . + .quad secondary_holding_pen_release + + /* + * This provides a "holding pen" for platforms to hold all secondary + * cores are held until we're ready for them to initialise. + */ +ENTRY(secondary_holding_pen) + bl el2_setup // Drop to EL1 + mrs x0, mpidr_el1 + and x0, x0, #15 // CPU number + adr x1, 1b + ldp x2, x3, [x1] + sub x1, x1, x2 + add x3, x3, x1 +pen: ldr x4, [x3] + cmp x4, x0 + b.eq secondary_startup + wfe + b pen +ENDPROC(secondary_holding_pen) + .popsection + +ENTRY(secondary_startup) + /* + * Common entry point for secondary CPUs. + */ + mrs x22, midr_el1 // x22=cpuid + mov x0, x22 + bl lookup_processor_type + mov x23, x0 // x23=current cpu_table + cbz x23, __error_p // invalid processor (x23=0)? + + bl __calc_phys_offset // x24=phys offset + pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 + ldr x12, [x23, #CPU_INFO_SETUP] + add x12, x12, x28 // __virt_to_phys + blr x12 // initialise processor + + ldr x21, =secondary_data + ldr x27, =__secondary_switched // address to jump to after enabling the MMU + b __enable_mmu +ENDPROC(secondary_startup) + +ENTRY(__secondary_switched) + ldr x0, [x21] // get secondary_data.stack + mov sp, x0 + mov x29, #0 + b secondary_start_kernel +ENDPROC(__secondary_switched) +#endif /* CONFIG_SMP */ + +/* + * Setup common bits before finally enabling the MMU. Essentially this is just + * loading the page table pointer and vector base registers. + * + * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on + * the MMU. + */ +__enable_mmu: + ldr x5, =vectors + msr vbar_el1, x5 + msr ttbr0_el1, x25 // load TTBR0 + msr ttbr1_el1, x26 // load TTBR1 + isb + b __turn_mmu_on +ENDPROC(__enable_mmu) + +/* + * Enable the MMU. This completely changes the structure of the visible memory + * space. You will not be able to trace execution through this. + * + * x0 = system control register + * x27 = *virtual* address to jump to upon completion + * + * other registers depend on the function called upon completion + */ + .align 6 +__turn_mmu_on: + msr sctlr_el1, x0 + isb + br x27 +ENDPROC(__turn_mmu_on) + +/* + * Calculate the start of physical memory. + */ +__calc_phys_offset: + adr x0, 1f + ldp x1, x2, [x0] + sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET + add x24, x2, x28 // x24 = PHYS_OFFSET + ret +ENDPROC(__calc_phys_offset) + + .align 3 +1: .quad . + .quad PAGE_OFFSET + +/* + * Macro to populate the PGD for the corresponding block entry in the next + * level (tbl) for the given virtual address. + * + * Preserves: pgd, tbl, virt + * Corrupts: tmp1, tmp2 + */ + .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 + lsr \tmp1, \virt, #PGDIR_SHIFT + and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index + orr \tmp2, \tbl, #3 // PGD entry table type + str \tmp2, [\pgd, \tmp1, lsl #3] + .endm + +/* + * Macro to populate block entries in the page table for the start..end + * virtual range (inclusive). + * + * Preserves: tbl, flags + * Corrupts: phys, start, end, pstate + */ + .macro create_block_map, tbl, flags, phys, start, end, idmap=0 + lsr \phys, \phys, #BLOCK_SHIFT + .if \idmap + and \start, \phys, #PTRS_PER_PTE - 1 // table index + .else + lsr \start, \start, #BLOCK_SHIFT + and \start, \start, #PTRS_PER_PTE - 1 // table index + .endif + orr \phys, \flags, \phys, lsl #BLOCK_SHIFT // table entry + .ifnc \start,\end + lsr \end, \end, #BLOCK_SHIFT + and \end, \end, #PTRS_PER_PTE - 1 // table end index + .endif +9999: str \phys, [\tbl, \start, lsl #3] // store the entry + .ifnc \start,\end + add \start, \start, #1 // next entry + add \phys, \phys, #BLOCK_SIZE // next block + cmp \start, \end + b.ls 9999b + .endif + .endm + +/* + * Setup the initial page tables. We only setup the barest amount which is + * required to get the kernel running. The following sections are required: + * - identity mapping to enable the MMU (low address, TTBR0) + * - first few MB of the kernel linear mapping to jump to once the MMU has + * been enabled, including the FDT blob (TTBR1) + */ +__create_page_tables: + pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses + + /* + * Clear the idmap and swapper page tables. + */ + mov x0, x25 + add x6, x26, #SWAPPER_DIR_SIZE +1: stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + stp xzr, xzr, [x0], #16 + cmp x0, x6 + b.lo 1b + + ldr x7, =MM_MMUFLAGS + + /* + * Create the identity mapping. + */ + add x0, x25, #PAGE_SIZE // section table address + adr x3, __turn_mmu_on // virtual/physical address + create_pgd_entry x25, x0, x3, x5, x6 + create_block_map x0, x7, x3, x5, x5, idmap=1 + + /* + * Map the kernel image (starting with PHYS_OFFSET). + */ + add x0, x26, #PAGE_SIZE // section table address + mov x5, #PAGE_OFFSET + create_pgd_entry x26, x0, x5, x3, x6 + ldr x6, =KERNEL_END - 1 + mov x3, x24 // phys offset + create_block_map x0, x7, x3, x5, x6 + + /* + * Map the FDT blob (maximum 2MB; must be within 512MB of + * PHYS_OFFSET). + */ + mov x3, x21 // FDT phys address + and x3, x3, #~((1 << 21) - 1) // 2MB aligned + mov x6, #PAGE_OFFSET + sub x5, x3, x24 // subtract PHYS_OFFSET + tst x5, #~((1 << 29) - 1) // within 512MB? + csel x21, xzr, x21, ne // zero the FDT pointer + b.ne 1f + add x5, x5, x6 // __va(FDT blob) + add x6, x5, #1 << 21 // 2MB for the FDT blob + sub x6, x6, #1 // inclusive range + create_block_map x0, x7, x3, x5, x6 +1: + ret +ENDPROC(__create_page_tables) + .ltorg + + .align 3 + .type __switch_data, %object +__switch_data: + .quad __mmap_switched + .quad __data_loc // x4 + .quad _data // x5 + .quad __bss_start // x6 + .quad _end // x7 + .quad processor_id // x4 + .quad __fdt_pointer // x5 + .quad memstart_addr // x6 + .quad init_thread_union + THREAD_START_SP // sp + +/* + * The following fragment of code is executed with the MMU on in MMU mode, and + * uses absolute addresses; this is not position independent. + */ +__mmap_switched: + adr x3, __switch_data + 8 + + ldp x4, x5, [x3], #16 + ldp x6, x7, [x3], #16 + cmp x4, x5 // Copy data segment if needed +1: ccmp x5, x6, #4, ne + b.eq 2f + ldr x16, [x4], #8 + str x16, [x5], #8 + b 1b +2: +1: cmp x6, x7 + b.hs 2f + str xzr, [x6], #8 // Clear BSS + b 1b +2: + ldp x4, x5, [x3], #16 + ldr x6, [x3], #8 + ldr x16, [x3] + mov sp, x16 + str x22, [x4] // Save processor ID + str x21, [x5] // Save FDT pointer + str x24, [x6] // Save PHYS_OFFSET + mov x29, #0 + b start_kernel +ENDPROC(__mmap_switched) + +/* + * Exception handling. Something went wrong and we can't proceed. We ought to + * tell the user, but since we don't have any guarantee that we're even + * running on the right architecture, we do virtually nothing. + */ +__error_p: +ENDPROC(__error_p) + +__error: +1: nop + b 1b +ENDPROC(__error) + +/* + * This function gets the processor ID in w0 and searches the cpu_table[] for + * a match. It returns a pointer to the struct cpu_info it found. The + * cpu_table[] must end with an empty (all zeros) structure. + * + * This routine can be called via C code and it needs to work with the MMU + * both disabled and enabled (the offset is calculated automatically). + */ +ENTRY(lookup_processor_type) + adr x1, __lookup_processor_type_data + ldp x2, x3, [x1] + sub x1, x1, x2 // get offset between VA and PA + add x3, x3, x1 // convert VA to PA +1: + ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask + cbz w5, 2f // end of list? + and w6, w6, w0 + cmp w5, w6 + b.eq 3f + add x3, x3, #CPU_INFO_SZ + b 1b +2: + mov x3, #0 // unknown processor +3: + mov x0, x3 + ret +ENDPROC(lookup_processor_type) + + .align 3 + .type __lookup_processor_type_data, %object +__lookup_processor_type_data: + .quad . + .quad cpu_table + .size __lookup_processor_type_data, . - __lookup_processor_type_data + +/* + * Determine validity of the x21 FDT pointer. + * The dtb must be 8-byte aligned and live in the first 512M of memory. + */ +__vet_fdt: + tst x21, #0x7 + b.ne 1f + cmp x21, x24 + b.lt 1f + mov x0, #(1 << 29) + add x0, x0, x24 + cmp x21, x0 + b.ge 1f + ret +1: + mov x21, #0 + ret +ENDPROC(__vet_fdt) diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c new file mode 100644 index 000000000000..5ab825c59db9 --- /dev/null +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -0,0 +1,880 @@ +/* + * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility, + * using the CPU's debug registers. + * + * Copyright (C) 2012 ARM Limited + * Author: Will Deacon <will.deacon@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "hw-breakpoint: " fmt + +#include <linux/errno.h> +#include <linux/hw_breakpoint.h> +#include <linux/perf_event.h> +#include <linux/ptrace.h> +#include <linux/smp.h> + +#include <asm/compat.h> +#include <asm/current.h> +#include <asm/debug-monitors.h> +#include <asm/hw_breakpoint.h> +#include <asm/kdebug.h> +#include <asm/traps.h> +#include <asm/cputype.h> +#include <asm/system_misc.h> + +/* Breakpoint currently in use for each BRP. */ +static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]); + +/* Watchpoint currently in use for each WRP. */ +static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]); + +/* Currently stepping a per-CPU kernel breakpoint. */ +static DEFINE_PER_CPU(int, stepping_kernel_bp); + +/* Number of BRP/WRP registers on this CPU. */ +static int core_num_brps; +static int core_num_wrps; + +/* Determine number of BRP registers available. */ +static int get_num_brps(void) +{ + return ((read_cpuid(ID_AA64DFR0_EL1) >> 12) & 0xf) + 1; +} + +/* Determine number of WRP registers available. */ +static int get_num_wrps(void) +{ + return ((read_cpuid(ID_AA64DFR0_EL1) >> 20) & 0xf) + 1; +} + +int hw_breakpoint_slots(int type) +{ + /* + * We can be called early, so don't rely on + * our static variables being initialised. + */ + switch (type) { + case TYPE_INST: + return get_num_brps(); + case TYPE_DATA: + return get_num_wrps(); + default: + pr_warning("unknown slot type: %d\n", type); + return 0; + } +} + +#define READ_WB_REG_CASE(OFF, N, REG, VAL) \ + case (OFF + N): \ + AARCH64_DBG_READ(N, REG, VAL); \ + break + +#define WRITE_WB_REG_CASE(OFF, N, REG, VAL) \ + case (OFF + N): \ + AARCH64_DBG_WRITE(N, REG, VAL); \ + break + +#define GEN_READ_WB_REG_CASES(OFF, REG, VAL) \ + READ_WB_REG_CASE(OFF, 0, REG, VAL); \ + READ_WB_REG_CASE(OFF, 1, REG, VAL); \ + READ_WB_REG_CASE(OFF, 2, REG, VAL); \ + READ_WB_REG_CASE(OFF, 3, REG, VAL); \ + READ_WB_REG_CASE(OFF, 4, REG, VAL); \ + READ_WB_REG_CASE(OFF, 5, REG, VAL); \ + READ_WB_REG_CASE(OFF, 6, REG, VAL); \ + READ_WB_REG_CASE(OFF, 7, REG, VAL); \ + READ_WB_REG_CASE(OFF, 8, REG, VAL); \ + READ_WB_REG_CASE(OFF, 9, REG, VAL); \ + READ_WB_REG_CASE(OFF, 10, REG, VAL); \ + READ_WB_REG_CASE(OFF, 11, REG, VAL); \ + READ_WB_REG_CASE(OFF, 12, REG, VAL); \ + READ_WB_REG_CASE(OFF, 13, REG, VAL); \ + READ_WB_REG_CASE(OFF, 14, REG, VAL); \ + READ_WB_REG_CASE(OFF, 15, REG, VAL) + +#define GEN_WRITE_WB_REG_CASES(OFF, REG, VAL) \ + WRITE_WB_REG_CASE(OFF, 0, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 1, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 2, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 3, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 4, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 5, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 6, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 7, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 8, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 9, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 10, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 11, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 12, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 13, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 14, REG, VAL); \ + WRITE_WB_REG_CASE(OFF, 15, REG, VAL) + +static u64 read_wb_reg(int reg, int n) +{ + u64 val = 0; + + switch (reg + n) { + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); + GEN_READ_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); + default: + pr_warning("attempt to read from unknown breakpoint register %d\n", n); + } + + return val; +} + +static void write_wb_reg(int reg, int n, u64 val) +{ + switch (reg + n) { + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val); + GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val); + default: + pr_warning("attempt to write to unknown breakpoint register %d\n", n); + } + isb(); +} + +/* + * Convert a breakpoint privilege level to the corresponding exception + * level. + */ +static enum debug_el debug_exception_level(int privilege) +{ + switch (privilege) { + case AARCH64_BREAKPOINT_EL0: + return DBG_ACTIVE_EL0; + case AARCH64_BREAKPOINT_EL1: + return DBG_ACTIVE_EL1; + default: + pr_warning("invalid breakpoint privilege level %d\n", privilege); + return -EINVAL; + } +} + +/* + * Install a perf counter breakpoint. + */ +int arch_install_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + struct debug_info *debug_info = ¤t->thread.debug; + int i, max_slots, ctrl_reg, val_reg, reg_enable; + u32 ctrl; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + ctrl_reg = AARCH64_DBG_REG_BCR; + val_reg = AARCH64_DBG_REG_BVR; + slots = __get_cpu_var(bp_on_reg); + max_slots = core_num_brps; + reg_enable = !debug_info->bps_disabled; + } else { + /* Watchpoint */ + ctrl_reg = AARCH64_DBG_REG_WCR; + val_reg = AARCH64_DBG_REG_WVR; + slots = __get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + reg_enable = !debug_info->wps_disabled; + } + + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (!*slot) { + *slot = bp; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) + return -ENOSPC; + + /* Ensure debug monitors are enabled at the correct exception level. */ + enable_debug_monitors(debug_exception_level(info->ctrl.privilege)); + + /* Setup the address register. */ + write_wb_reg(val_reg, i, info->address); + + /* Setup the control register. */ + ctrl = encode_ctrl_reg(info->ctrl); + write_wb_reg(ctrl_reg, i, reg_enable ? ctrl | 0x1 : ctrl & ~0x1); + + return 0; +} + +void arch_uninstall_hw_breakpoint(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + struct perf_event **slot, **slots; + int i, max_slots, base; + + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + /* Breakpoint */ + base = AARCH64_DBG_REG_BCR; + slots = __get_cpu_var(bp_on_reg); + max_slots = core_num_brps; + } else { + /* Watchpoint */ + base = AARCH64_DBG_REG_WCR; + slots = __get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + } + + /* Remove the breakpoint. */ + for (i = 0; i < max_slots; ++i) { + slot = &slots[i]; + + if (*slot == bp) { + *slot = NULL; + break; + } + } + + if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) + return; + + /* Reset the control register. */ + write_wb_reg(base, i, 0); + + /* Release the debug monitors for the correct exception level. */ + disable_debug_monitors(debug_exception_level(info->ctrl.privilege)); +} + +static int get_hbp_len(u8 hbp_len) +{ + unsigned int len_in_bytes = 0; + + switch (hbp_len) { + case ARM_BREAKPOINT_LEN_1: + len_in_bytes = 1; + break; + case ARM_BREAKPOINT_LEN_2: + len_in_bytes = 2; + break; + case ARM_BREAKPOINT_LEN_4: + len_in_bytes = 4; + break; + case ARM_BREAKPOINT_LEN_8: + len_in_bytes = 8; + break; + } + + return len_in_bytes; +} + +/* + * Check whether bp virtual address is in kernel space. + */ +int arch_check_bp_in_kernelspace(struct perf_event *bp) +{ + unsigned int len; + unsigned long va; + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + va = info->address; + len = get_hbp_len(info->ctrl.len); + + return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); +} + +/* + * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl. + * Hopefully this will disappear when ptrace can bypass the conversion + * to generic breakpoint descriptions. + */ +int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl, + int *gen_len, int *gen_type) +{ + /* Type */ + switch (ctrl.type) { + case ARM_BREAKPOINT_EXECUTE: + *gen_type = HW_BREAKPOINT_X; + break; + case ARM_BREAKPOINT_LOAD: + *gen_type = HW_BREAKPOINT_R; + break; + case ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_W; + break; + case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE: + *gen_type = HW_BREAKPOINT_RW; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (ctrl.len) { + case ARM_BREAKPOINT_LEN_1: + *gen_len = HW_BREAKPOINT_LEN_1; + break; + case ARM_BREAKPOINT_LEN_2: + *gen_len = HW_BREAKPOINT_LEN_2; + break; + case ARM_BREAKPOINT_LEN_4: + *gen_len = HW_BREAKPOINT_LEN_4; + break; + case ARM_BREAKPOINT_LEN_8: + *gen_len = HW_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + return 0; +} + +/* + * Construct an arch_hw_breakpoint from a perf_event. + */ +static int arch_build_bp_info(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + + /* Type */ + switch (bp->attr.bp_type) { + case HW_BREAKPOINT_X: + info->ctrl.type = ARM_BREAKPOINT_EXECUTE; + break; + case HW_BREAKPOINT_R: + info->ctrl.type = ARM_BREAKPOINT_LOAD; + break; + case HW_BREAKPOINT_W: + info->ctrl.type = ARM_BREAKPOINT_STORE; + break; + case HW_BREAKPOINT_RW: + info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE; + break; + default: + return -EINVAL; + } + + /* Len */ + switch (bp->attr.bp_len) { + case HW_BREAKPOINT_LEN_1: + info->ctrl.len = ARM_BREAKPOINT_LEN_1; + break; + case HW_BREAKPOINT_LEN_2: + info->ctrl.len = ARM_BREAKPOINT_LEN_2; + break; + case HW_BREAKPOINT_LEN_4: + info->ctrl.len = ARM_BREAKPOINT_LEN_4; + break; + case HW_BREAKPOINT_LEN_8: + info->ctrl.len = ARM_BREAKPOINT_LEN_8; + break; + default: + return -EINVAL; + } + + /* + * On AArch64, we only permit breakpoints of length 4, whereas + * AArch32 also requires breakpoints of length 2 for Thumb. + * Watchpoints can be of length 1, 2, 4 or 8 bytes. + */ + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { + if (is_compat_task()) { + if (info->ctrl.len != ARM_BREAKPOINT_LEN_2 && + info->ctrl.len != ARM_BREAKPOINT_LEN_4) + return -EINVAL; + } else if (info->ctrl.len != ARM_BREAKPOINT_LEN_4) { + /* + * FIXME: Some tools (I'm looking at you perf) assume + * that breakpoints should be sizeof(long). This + * is nonsense. For now, we fix up the parameter + * but we should probably return -EINVAL instead. + */ + info->ctrl.len = ARM_BREAKPOINT_LEN_4; + } + } + + /* Address */ + info->address = bp->attr.bp_addr; + + /* + * Privilege + * Note that we disallow combined EL0/EL1 breakpoints because + * that would complicate the stepping code. + */ + if (arch_check_bp_in_kernelspace(bp)) + info->ctrl.privilege = AARCH64_BREAKPOINT_EL1; + else + info->ctrl.privilege = AARCH64_BREAKPOINT_EL0; + + /* Enabled? */ + info->ctrl.enabled = !bp->attr.disabled; + + return 0; +} + +/* + * Validate the arch-specific HW Breakpoint register settings. + */ +int arch_validate_hwbkpt_settings(struct perf_event *bp) +{ + struct arch_hw_breakpoint *info = counter_arch_bp(bp); + int ret; + u64 alignment_mask, offset; + + /* Build the arch_hw_breakpoint. */ + ret = arch_build_bp_info(bp); + if (ret) + return ret; + + /* + * Check address alignment. + * We don't do any clever alignment correction for watchpoints + * because using 64-bit unaligned addresses is deprecated for + * AArch64. + * + * AArch32 tasks expect some simple alignment fixups, so emulate + * that here. + */ + if (is_compat_task()) { + if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + alignment_mask = 0x7; + else + alignment_mask = 0x3; + offset = info->address & alignment_mask; + switch (offset) { + case 0: + /* Aligned */ + break; + case 1: + /* Allow single byte watchpoint. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_1) + break; + case 2: + /* Allow halfword watchpoints and breakpoints. */ + if (info->ctrl.len == ARM_BREAKPOINT_LEN_2) + break; + default: + return -EINVAL; + } + + info->address &= ~alignment_mask; + info->ctrl.len <<= offset; + } else { + if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) + alignment_mask = 0x3; + else + alignment_mask = 0x7; + if (info->address & alignment_mask) + return -EINVAL; + } + + /* + * Disallow per-task kernel breakpoints since these would + * complicate the stepping code. + */ + if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target) + return -EINVAL; + + return 0; +} + +/* + * Enable/disable all of the breakpoints active at the specified + * exception level at the register level. + * This is used when single-stepping after a breakpoint exception. + */ +static void toggle_bp_registers(int reg, enum debug_el el, int enable) +{ + int i, max_slots, privilege; + u32 ctrl; + struct perf_event **slots; + + switch (reg) { + case AARCH64_DBG_REG_BCR: + slots = __get_cpu_var(bp_on_reg); + max_slots = core_num_brps; + break; + case AARCH64_DBG_REG_WCR: + slots = __get_cpu_var(wp_on_reg); + max_slots = core_num_wrps; + break; + default: + return; + } + + for (i = 0; i < max_slots; ++i) { + if (!slots[i]) + continue; + + privilege = counter_arch_bp(slots[i])->ctrl.privilege; + if (debug_exception_level(privilege) != el) + continue; + + ctrl = read_wb_reg(reg, i); + if (enable) + ctrl |= 0x1; + else + ctrl &= ~0x1; + write_wb_reg(reg, i, ctrl); + } +} + +/* + * Debug exception handlers. + */ +static int breakpoint_handler(unsigned long unused, unsigned int esr, + struct pt_regs *regs) +{ + int i, step = 0, *kernel_step; + u32 ctrl_reg; + u64 addr, val; + struct perf_event *bp, **slots; + struct debug_info *debug_info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = (struct perf_event **)__get_cpu_var(bp_on_reg); + addr = instruction_pointer(regs); + debug_info = ¤t->thread.debug; + + for (i = 0; i < core_num_brps; ++i) { + rcu_read_lock(); + + bp = slots[i]; + + if (bp == NULL) + goto unlock; + + /* Check if the breakpoint value matches. */ + val = read_wb_reg(AARCH64_DBG_REG_BVR, i); + if (val != (addr & ~0x3)) + goto unlock; + + /* Possible match, check the byte address select to confirm. */ + ctrl_reg = read_wb_reg(AARCH64_DBG_REG_BCR, i); + decode_ctrl_reg(ctrl_reg, &ctrl); + if (!((1 << (addr & 0x3)) & ctrl.len)) + goto unlock; + + counter_arch_bp(bp)->trigger = addr; + perf_bp_event(bp, regs); + + /* Do we need to handle the stepping? */ + if (!bp->overflow_handler) + step = 1; +unlock: + rcu_read_unlock(); + } + + if (!step) + return 0; + + if (user_mode(regs)) { + debug_info->bps_disabled = 1; + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 0); + + /* If we're already stepping a watchpoint, just return. */ + if (debug_info->wps_disabled) + return 0; + + if (test_thread_flag(TIF_SINGLESTEP)) + debug_info->suspended_step = 1; + else + user_enable_single_step(current); + } else { + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); + kernel_step = &__get_cpu_var(stepping_kernel_bp); + + if (*kernel_step != ARM_KERNEL_STEP_NONE) + return 0; + + if (kernel_active_single_step()) { + *kernel_step = ARM_KERNEL_STEP_SUSPEND; + } else { + *kernel_step = ARM_KERNEL_STEP_ACTIVE; + kernel_enable_single_step(regs); + } + } + + return 0; +} + +static int watchpoint_handler(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + int i, step = 0, *kernel_step, access; + u32 ctrl_reg; + u64 val, alignment_mask; + struct perf_event *wp, **slots; + struct debug_info *debug_info; + struct arch_hw_breakpoint *info; + struct arch_hw_breakpoint_ctrl ctrl; + + slots = (struct perf_event **)__get_cpu_var(wp_on_reg); + debug_info = ¤t->thread.debug; + + for (i = 0; i < core_num_wrps; ++i) { + rcu_read_lock(); + + wp = slots[i]; + + if (wp == NULL) + goto unlock; + + info = counter_arch_bp(wp); + /* AArch32 watchpoints are either 4 or 8 bytes aligned. */ + if (is_compat_task()) { + if (info->ctrl.len == ARM_BREAKPOINT_LEN_8) + alignment_mask = 0x7; + else + alignment_mask = 0x3; + } else { + alignment_mask = 0x7; + } + + /* Check if the watchpoint value matches. */ + val = read_wb_reg(AARCH64_DBG_REG_WVR, i); + if (val != (addr & ~alignment_mask)) + goto unlock; + + /* Possible match, check the byte address select to confirm. */ + ctrl_reg = read_wb_reg(AARCH64_DBG_REG_WCR, i); + decode_ctrl_reg(ctrl_reg, &ctrl); + if (!((1 << (addr & alignment_mask)) & ctrl.len)) + goto unlock; + + /* + * Check that the access type matches. + * 0 => load, otherwise => store + */ + access = (esr & AARCH64_ESR_ACCESS_MASK) ? HW_BREAKPOINT_W : + HW_BREAKPOINT_R; + if (!(access & hw_breakpoint_type(wp))) + goto unlock; + + info->trigger = addr; + perf_bp_event(wp, regs); + + /* Do we need to handle the stepping? */ + if (!wp->overflow_handler) + step = 1; + +unlock: + rcu_read_unlock(); + } + + if (!step) + return 0; + + /* + * We always disable EL0 watchpoints because the kernel can + * cause these to fire via an unprivileged access. + */ + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 0); + + if (user_mode(regs)) { + debug_info->wps_disabled = 1; + + /* If we're already stepping a breakpoint, just return. */ + if (debug_info->bps_disabled) + return 0; + + if (test_thread_flag(TIF_SINGLESTEP)) + debug_info->suspended_step = 1; + else + user_enable_single_step(current); + } else { + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); + kernel_step = &__get_cpu_var(stepping_kernel_bp); + + if (*kernel_step != ARM_KERNEL_STEP_NONE) + return 0; + + if (kernel_active_single_step()) { + *kernel_step = ARM_KERNEL_STEP_SUSPEND; + } else { + *kernel_step = ARM_KERNEL_STEP_ACTIVE; + kernel_enable_single_step(regs); + } + } + + return 0; +} + +/* + * Handle single-step exception. + */ +int reinstall_suspended_bps(struct pt_regs *regs) +{ + struct debug_info *debug_info = ¤t->thread.debug; + int handled_exception = 0, *kernel_step; + + kernel_step = &__get_cpu_var(stepping_kernel_bp); + + /* + * Called from single-step exception handler. + * Return 0 if execution can resume, 1 if a SIGTRAP should be + * reported. + */ + if (user_mode(regs)) { + if (debug_info->bps_disabled) { + debug_info->bps_disabled = 0; + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL0, 1); + handled_exception = 1; + } + + if (debug_info->wps_disabled) { + debug_info->wps_disabled = 0; + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); + handled_exception = 1; + } + + if (handled_exception) { + if (debug_info->suspended_step) { + debug_info->suspended_step = 0; + /* Allow exception handling to fall-through. */ + handled_exception = 0; + } else { + user_disable_single_step(current); + } + } + } else if (*kernel_step != ARM_KERNEL_STEP_NONE) { + toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 1); + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 1); + + if (!debug_info->wps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL0, 1); + + if (*kernel_step != ARM_KERNEL_STEP_SUSPEND) { + kernel_disable_single_step(); + handled_exception = 1; + } else { + handled_exception = 0; + } + + *kernel_step = ARM_KERNEL_STEP_NONE; + } + + return !handled_exception; +} + +/* + * Context-switcher for restoring suspended breakpoints. + */ +void hw_breakpoint_thread_switch(struct task_struct *next) +{ + /* + * current next + * disabled: 0 0 => The usual case, NOTIFY_DONE + * 0 1 => Disable the registers + * 1 0 => Enable the registers + * 1 1 => NOTIFY_DONE. per-task bps will + * get taken care of by perf. + */ + + struct debug_info *current_debug_info, *next_debug_info; + + current_debug_info = ¤t->thread.debug; + next_debug_info = &next->thread.debug; + + /* Update breakpoints. */ + if (current_debug_info->bps_disabled != next_debug_info->bps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_BCR, + DBG_ACTIVE_EL0, + !next_debug_info->bps_disabled); + + /* Update watchpoints. */ + if (current_debug_info->wps_disabled != next_debug_info->wps_disabled) + toggle_bp_registers(AARCH64_DBG_REG_WCR, + DBG_ACTIVE_EL0, + !next_debug_info->wps_disabled); +} + +/* + * CPU initialisation. + */ +static void reset_ctrl_regs(void *unused) +{ + int i; + + for (i = 0; i < core_num_brps; ++i) { + write_wb_reg(AARCH64_DBG_REG_BCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_BVR, i, 0UL); + } + + for (i = 0; i < core_num_wrps; ++i) { + write_wb_reg(AARCH64_DBG_REG_WCR, i, 0UL); + write_wb_reg(AARCH64_DBG_REG_WVR, i, 0UL); + } +} + +static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self, + unsigned long action, + void *hcpu) +{ + int cpu = (long)hcpu; + if (action == CPU_ONLINE) + smp_call_function_single(cpu, reset_ctrl_regs, NULL, 1); + return NOTIFY_OK; +} + +static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = { + .notifier_call = hw_breakpoint_reset_notify, +}; + +/* + * One-time initialisation. + */ +static int __init arch_hw_breakpoint_init(void) +{ + core_num_brps = get_num_brps(); + core_num_wrps = get_num_wrps(); + + pr_info("found %d breakpoint and %d watchpoint registers.\n", + core_num_brps, core_num_wrps); + + /* + * Reset the breakpoint resources. We assume that a halting + * debugger will leave the world in a nice state for us. + */ + smp_call_function(reset_ctrl_regs, NULL, 1); + reset_ctrl_regs(NULL); + + /* Register debug fault handlers. */ + hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, + TRAP_HWBKPT, "hw-breakpoint handler"); + hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, + TRAP_HWBKPT, "hw-watchpoint handler"); + + /* Register hotplug notifier. */ + register_cpu_notifier(&hw_breakpoint_reset_nb); + + return 0; +} +arch_initcall(arch_hw_breakpoint_init); + +void hw_breakpoint_pmu_read(struct perf_event *bp) +{ +} + +/* + * Dummy function to register with die_notifier. + */ +int hw_breakpoint_exceptions_notify(struct notifier_block *unused, + unsigned long val, void *data) +{ + return NOTIFY_DONE; +} diff --git a/arch/arm64/kernel/io.c b/arch/arm64/kernel/io.c new file mode 100644 index 000000000000..7d37ead4d199 --- /dev/null +++ b/arch/arm64/kernel/io.c @@ -0,0 +1,64 @@ +/* + * Based on arch/arm/kernel/io.c + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/types.h> +#include <linux/io.h> + +/* + * Copy data from IO memory space to "real" memory space. + */ +void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count) +{ + unsigned char *t = to; + while (count) { + count--; + *t = readb(from); + t++; + from++; + } +} +EXPORT_SYMBOL(__memcpy_fromio); + +/* + * Copy data from "real" memory space to IO memory space. + */ +void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count) +{ + const unsigned char *f = from; + while (count) { + count--; + writeb(*f, to); + f++; + to++; + } +} +EXPORT_SYMBOL(__memcpy_toio); + +/* + * "memset" on IO memory space. + */ +void __memset_io(volatile void __iomem *dst, int c, size_t count) +{ + while (count) { + count--; + writeb(c, dst); + dst++; + } +} +EXPORT_SYMBOL(__memset_io); diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c new file mode 100644 index 000000000000..0373c6609eaf --- /dev/null +++ b/arch/arm64/kernel/irq.c @@ -0,0 +1,84 @@ +/* + * Based on arch/arm/kernel/irq.c + * + * Copyright (C) 1992 Linus Torvalds + * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. + * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. + * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and + * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel_stat.h> +#include <linux/irq.h> +#include <linux/smp.h> +#include <linux/init.h> +#include <linux/of_irq.h> +#include <linux/seq_file.h> +#include <linux/ratelimit.h> + +unsigned long irq_err_count; + +int arch_show_interrupts(struct seq_file *p, int prec) +{ +#ifdef CONFIG_SMP + show_ipi_list(p, prec); +#endif + seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); + return 0; +} + +/* + * handle_IRQ handles all hardware IRQ's. Decoded IRQs should + * not come via this function. Instead, they should provide their + * own 'handler'. Used by platform code implementing C-based 1st + * level decoding. + */ +void handle_IRQ(unsigned int irq, struct pt_regs *regs) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + irq_enter(); + + /* + * Some hardware gives randomly wrong interrupts. Rather + * than crashing, do something sensible. + */ + if (unlikely(irq >= nr_irqs)) { + pr_warn_ratelimited("Bad IRQ%u\n", irq); + ack_bad_irq(irq); + } else { + generic_handle_irq(irq); + } + + irq_exit(); + set_irq_regs(old_regs); +} + +/* + * Interrupt controllers supported by the kernel. + */ +static const struct of_device_id intctrl_of_match[] __initconst = { + /* IRQ controllers { .compatible, .data } info to go here */ + {} +}; + +void __init init_IRQ(void) +{ + of_irq_init(intctrl_of_match); + + if (!handle_arch_irq) + panic("No interrupt controller found."); +} diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S new file mode 100644 index 000000000000..8b69ecb1d8bc --- /dev/null +++ b/arch/arm64/kernel/kuser32.S @@ -0,0 +1,77 @@ +/* + * Low-level user helpers placed in the vectors page for AArch32. + * Based on the kuser helpers in arch/arm/kernel/entry-armv.S. + * + * Copyright (C) 2005-2011 Nicolas Pitre <nico@fluxnic.net> + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * + * AArch32 user helpers. + * + * Each segment is 32-byte aligned and will be moved to the top of the high + * vector page. New segments (if ever needed) must be added in front of + * existing ones. This mechanism should be used only for things that are + * really small and justified, and not be abused freely. + * + * See Documentation/arm/kernel_user_helpers.txt for formal definitions. + */ + .align 5 + .globl __kuser_helper_start +__kuser_helper_start: + +__kuser_cmpxchg64: // 0xffff0f60 + .inst 0xe92d00f0 // push {r4, r5, r6, r7} + .inst 0xe1c040d0 // ldrd r4, r5, [r0] + .inst 0xe1c160d0 // ldrd r6, r7, [r1] + .inst 0xf57ff05f // dmb sy + .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2] + .inst 0xe0303004 // eors r3, r0, r4 + .inst 0x00313005 // eoreqs r3, r1, r5 + .inst 0x01a23f96 // strexdeq r3, r6, [r2] + .inst 0x03330001 // teqeq r3, #1 + .inst 0x0afffff9 // beq 1b + .inst 0xf57ff05f // dmb sy + .inst 0xe2730000 // rsbs r0, r3, #0 + .inst 0xe8bd00f0 // pop {r4, r5, r6, r7} + .inst 0xe12fff1e // bx lr + + .align 5 +__kuser_memory_barrier: // 0xffff0fa0 + .inst 0xf57ff05f // dmb sy + .inst 0xe12fff1e // bx lr + + .align 5 +__kuser_cmpxchg: // 0xffff0fc0 + .inst 0xf57ff05f // dmb sy + .inst 0xe1923f9f // 1: ldrex r3, [r2] + .inst 0xe0533000 // subs r3, r3, r0 + .inst 0x01823f91 // strexeq r3, r1, [r2] + .inst 0x03330001 // teqeq r3, #1 + .inst 0x0afffffa // beq 1b + .inst 0xe2730000 // rsbs r0, r3, #0 + .inst 0xeaffffef // b <__kuser_memory_barrier> + + .align 5 +__kuser_get_tls: // 0xffff0fe0 + .inst 0xee1d0f70 // mrc p15, 0, r0, c13, c0, 3 + .inst 0xe12fff1e // bx lr + .rep 5 + .word 0 + .endr + +__kuser_helper_version: // 0xffff0ffc + .word ((__kuser_helper_end - __kuser_helper_start) >> 5) + .globl __kuser_helper_end +__kuser_helper_end: diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c new file mode 100644 index 000000000000..ca0e3d55da99 --- /dev/null +++ b/arch/arm64/kernel/module.c @@ -0,0 +1,456 @@ +/* + * AArch64 loadable module support. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/bitops.h> +#include <linux/elf.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/moduleloader.h> +#include <linux/vmalloc.h> + +void *module_alloc(unsigned long size) +{ + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL_EXEC, -1, + __builtin_return_address(0)); +} + +enum aarch64_reloc_op { + RELOC_OP_NONE, + RELOC_OP_ABS, + RELOC_OP_PREL, + RELOC_OP_PAGE, +}; + +static u64 do_reloc(enum aarch64_reloc_op reloc_op, void *place, u64 val) +{ + switch (reloc_op) { + case RELOC_OP_ABS: + return val; + case RELOC_OP_PREL: + return val - (u64)place; + case RELOC_OP_PAGE: + return (val & ~0xfff) - ((u64)place & ~0xfff); + case RELOC_OP_NONE: + return 0; + } + + pr_err("do_reloc: unknown relocation operation %d\n", reloc_op); + return 0; +} + +static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len) +{ + u64 imm_mask = (1 << len) - 1; + s64 sval = do_reloc(op, place, val); + + switch (len) { + case 16: + *(s16 *)place = sval; + break; + case 32: + *(s32 *)place = sval; + break; + case 64: + *(s64 *)place = sval; + break; + default: + pr_err("Invalid length (%d) for data relocation\n", len); + return 0; + } + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the value is not representable in + * len bits (i.e the bottom len bits are not sign-extended and + * the top bits are not all zero). + */ + if ((u64)(sval + 1) > 2) + return -ERANGE; + + return 0; +} + +enum aarch64_imm_type { + INSN_IMM_MOVNZ, + INSN_IMM_MOVK, + INSN_IMM_ADR, + INSN_IMM_26, + INSN_IMM_19, + INSN_IMM_16, + INSN_IMM_14, + INSN_IMM_12, + INSN_IMM_9, +}; + +static u32 encode_insn_immediate(enum aarch64_imm_type type, u32 insn, u64 imm) +{ + u32 immlo, immhi, lomask, himask, mask; + int shift; + + switch (type) { + case INSN_IMM_MOVNZ: + /* + * For signed MOVW relocations, we have to manipulate the + * instruction encoding depending on whether or not the + * immediate is less than zero. + */ + insn &= ~(3 << 29); + if ((s64)imm >= 0) { + /* >=0: Set the instruction to MOVZ (opcode 10b). */ + insn |= 2 << 29; + } else { + /* + * <0: Set the instruction to MOVN (opcode 00b). + * Since we've masked the opcode already, we + * don't need to do anything other than + * inverting the new immediate field. + */ + imm = ~imm; + } + case INSN_IMM_MOVK: + mask = BIT(16) - 1; + shift = 5; + break; + case INSN_IMM_ADR: + lomask = 0x3; + himask = 0x7ffff; + immlo = imm & lomask; + imm >>= 2; + immhi = imm & himask; + imm = (immlo << 24) | (immhi); + mask = (lomask << 24) | (himask); + shift = 5; + break; + case INSN_IMM_26: + mask = BIT(26) - 1; + shift = 0; + break; + case INSN_IMM_19: + mask = BIT(19) - 1; + shift = 5; + break; + case INSN_IMM_16: + mask = BIT(16) - 1; + shift = 5; + break; + case INSN_IMM_14: + mask = BIT(14) - 1; + shift = 5; + break; + case INSN_IMM_12: + mask = BIT(12) - 1; + shift = 10; + break; + case INSN_IMM_9: + mask = BIT(9) - 1; + shift = 12; + break; + default: + pr_err("encode_insn_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + + /* Update the immediate field. */ + insn &= ~(mask << shift); + insn |= (imm & mask) << shift; + + return insn; +} + +static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, enum aarch64_imm_type imm_type) +{ + u64 imm, limit = 0; + s64 sval; + u32 insn = *(u32 *)place; + + sval = do_reloc(op, place, val); + sval >>= lsb; + imm = sval & 0xffff; + + /* Update the instruction with the new encoding. */ + *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + + /* Shift out the immediate field. */ + sval >>= 16; + + /* + * For unsigned immediates, the overflow check is straightforward. + * For signed immediates, the sign bit is actually the bit past the + * most significant bit of the field. + * The INSN_IMM_16 immediate type is unsigned. + */ + if (imm_type != INSN_IMM_16) { + sval++; + limit++; + } + + /* Check the upper bits depending on the sign of the immediate. */ + if ((u64)sval > limit) + return -ERANGE; + + return 0; +} + +static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, + int lsb, int len, enum aarch64_imm_type imm_type) +{ + u64 imm, imm_mask; + s64 sval; + u32 insn = *(u32 *)place; + + /* Calculate the relocation value. */ + sval = do_reloc(op, place, val); + sval >>= lsb; + + /* Extract the value bits and shift them to bit 0. */ + imm_mask = (BIT(lsb + len) - 1) >> lsb; + imm = sval & imm_mask; + + /* Update the instruction's immediate field. */ + *(u32 *)place = encode_insn_immediate(imm_type, insn, imm); + + /* + * Extract the upper value bits (including the sign bit) and + * shift them to bit 0. + */ + sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); + + /* + * Overflow has occurred if the upper bits are not all equal to + * the sign bit of the value. + */ + if ((u64)(sval + 1) >= 2) + return -ERANGE; + + return 0; +} + +int apply_relocate_add(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex, + unsigned int relsec, + struct module *me) +{ + unsigned int i; + int ovf; + bool overflow_check; + Elf64_Sym *sym; + void *loc; + u64 val; + Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr; + + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* loc corresponds to P in the AArch64 ELF document. */ + loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + + rel[i].r_offset; + + /* sym is the ELF symbol we're referring to. */ + sym = (Elf64_Sym *)sechdrs[symindex].sh_addr + + ELF64_R_SYM(rel[i].r_info); + + /* val corresponds to (S + A) in the AArch64 ELF document. */ + val = sym->st_value + rel[i].r_addend; + + /* Check for overflow by default. */ + overflow_check = true; + + /* Perform the static relocation. */ + switch (ELF64_R_TYPE(rel[i].r_info)) { + /* Null relocations. */ + case R_ARM_NONE: + case R_AARCH64_NONE: + ovf = 0; + break; + + /* Data relocations. */ + case R_AARCH64_ABS64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_ABS, loc, val, 64); + break; + case R_AARCH64_ABS32: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 32); + break; + case R_AARCH64_ABS16: + ovf = reloc_data(RELOC_OP_ABS, loc, val, 16); + break; + case R_AARCH64_PREL64: + overflow_check = false; + ovf = reloc_data(RELOC_OP_PREL, loc, val, 64); + break; + case R_AARCH64_PREL32: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 32); + break; + case R_AARCH64_PREL16: + ovf = reloc_data(RELOC_OP_PREL, loc, val, 16); + break; + + /* MOVW instruction relocations. */ + case R_AARCH64_MOVW_UABS_G0_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G1_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G2_NC: + overflow_check = false; + case R_AARCH64_MOVW_UABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_UABS_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48, + INSN_IMM_16); + break; + case R_AARCH64_MOVW_SABS_G0: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G1: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_SABS_G2: + ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G0_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G0: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G1_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G1: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G2_NC: + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + INSN_IMM_MOVK); + break; + case R_AARCH64_MOVW_PREL_G2: + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32, + INSN_IMM_MOVNZ); + break; + case R_AARCH64_MOVW_PREL_G3: + /* We're using the top bits so we can't overflow. */ + overflow_check = false; + ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48, + INSN_IMM_MOVNZ); + break; + + /* Immediate instruction relocations. */ + case R_AARCH64_LD_PREL_LO19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + INSN_IMM_19); + break; + case R_AARCH64_ADR_PREL_LO21: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21, + INSN_IMM_ADR); + break; + case R_AARCH64_ADR_PREL_PG_HI21_NC: + overflow_check = false; + case R_AARCH64_ADR_PREL_PG_HI21: + ovf = reloc_insn_imm(RELOC_OP_PAGE, loc, val, 12, 21, + INSN_IMM_ADR); + break; + case R_AARCH64_ADD_ABS_LO12_NC: + case R_AARCH64_LDST8_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12, + INSN_IMM_12); + break; + case R_AARCH64_LDST16_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11, + INSN_IMM_12); + break; + case R_AARCH64_LDST32_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10, + INSN_IMM_12); + break; + case R_AARCH64_LDST64_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9, + INSN_IMM_12); + break; + case R_AARCH64_LDST128_ABS_LO12_NC: + overflow_check = false; + ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8, + INSN_IMM_12); + break; + case R_AARCH64_TSTBR14: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14, + INSN_IMM_14); + break; + case R_AARCH64_CONDBR19: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19, + INSN_IMM_19); + break; + case R_AARCH64_JUMP26: + case R_AARCH64_CALL26: + ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26, + INSN_IMM_26); + break; + + default: + pr_err("module %s: unsupported RELA relocation: %llu\n", + me->name, ELF64_R_TYPE(rel[i].r_info)); + return -ENOEXEC; + } + + if (overflow_check && ovf == -ERANGE) + goto overflow; + + } + + return 0; + +overflow: + pr_err("module %s: overflow in relocation type %d val %Lx\n", + me->name, (int)ELF64_R_TYPE(rel[i].r_info), val); + return -ENOEXEC; +} diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c new file mode 100644 index 000000000000..ecbf2d81ec5c --- /dev/null +++ b/arch/arm64/kernel/perf_event.c @@ -0,0 +1,1368 @@ +/* + * PMU support + * + * Copyright (C) 2012 ARM Limited + * Author: Will Deacon <will.deacon@arm.com> + * + * This code is based heavily on the ARMv7 perf event code. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/bitmap.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/cputype.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> +#include <asm/stacktrace.h> + +/* + * ARMv8 supports a maximum of 32 events. + * The cycle counter is included in this total. + */ +#define ARMPMU_MAX_HWEVENTS 32 + +static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); +static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); +static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); + +#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) + +/* Set at runtime when we know what CPU type we are. */ +static struct arm_pmu *cpu_pmu; + +int +armpmu_get_max_events(void) +{ + int max_events = 0; + + if (cpu_pmu != NULL) + max_events = cpu_pmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(armpmu_get_max_events); + +int perf_num_counters(void) +{ + return armpmu_get_max_events(); +} +EXPORT_SYMBOL_GPL(perf_num_counters); + +#define HW_OP_UNSUPPORTED 0xFFFF + +#define C(_x) \ + PERF_COUNT_HW_CACHE_##_x + +#define CACHE_OP_UNSUPPORTED 0xFFFF + +static int +armpmu_map_cache_event(const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) +{ + int mapping = (*event_map)[config]; + return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; +} + +static int +armpmu_map_raw_event(u32 raw_event_mask, u64 config) +{ + return (int)(config & raw_event_mask); +} + +static int map_cpu_event(struct perf_event *event, + const unsigned (*event_map)[PERF_COUNT_HW_MAX], + const unsigned (*cache_map) + [PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX], + u32 raw_event_mask) +{ + u64 config = event->attr.config; + + switch (event->attr.type) { + case PERF_TYPE_HARDWARE: + return armpmu_map_event(event_map, config); + case PERF_TYPE_HW_CACHE: + return armpmu_map_cache_event(cache_map, config); + case PERF_TYPE_RAW: + return armpmu_map_raw_event(raw_event_mask, config); + } + + return -ENOENT; +} + +int +armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + s64 left = local64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + local64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)armpmu->max_period) + left = armpmu->max_period; + + local64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +u64 +armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + u64 delta, prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(idx); + + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - prev_raw_count) & armpmu->max_period; + + local64_add(delta, &event->count); + local64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + armpmu_event_update(event, hwc, hwc->idx); +} + +static void +armpmu_stop(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to update the counter, so ignore + * PERF_EF_UPDATE, see comments in armpmu_start(). + */ + if (!(hwc->state & PERF_HES_STOPPED)) { + armpmu->disable(hwc, hwc->idx); + barrier(); /* why? */ + armpmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static void +armpmu_start(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + + /* + * ARM pmu always has to reprogram the period, so ignore + * PERF_EF_RELOAD, see the comment below. + */ + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + /* + * Set the period again. Some counters can't be stopped, so when we + * were stopped we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event, hwc, hwc->idx); + armpmu->enable(hwc, hwc->idx); +} + +static void +armpmu_del(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + armpmu_stop(event, PERF_EF_UPDATE); + hw_events->events[idx] = NULL; + clear_bit(idx, hw_events->used_mask); + + perf_event_update_userpage(event); +} + +static int +armpmu_add(struct perf_event *event, int flags) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + perf_pmu_disable(event->pmu); + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(hw_events, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(hwc, idx); + hw_events->events[idx] = event; + + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + armpmu_start(event, PERF_EF_RELOAD); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; +} + +static int +validate_event(struct pmu_hw_events *hw_events, + struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event fake_event = event->hw; + struct pmu *leader_pmu = event->group_leader->pmu; + + if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; + + return armpmu->get_event_idx(hw_events, &fake_event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct pmu_hw_events fake_pmu; + DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); + + /* + * Initialise the fake PMU. We only need to populate the + * used_mask for the purposes of validation. + */ + memset(fake_used_mask, 0, sizeof(fake_used_mask)); + fake_pmu.used_mask = fake_used_mask; + + if (!validate_event(&fake_pmu, leader)) + return -EINVAL; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -EINVAL; + } + + if (!validate_event(&fake_pmu, event)) + return -EINVAL; + + return 0; +} + +static void +armpmu_release_hardware(struct arm_pmu *armpmu) +{ + int i, irq, irqs; + struct platform_device *pmu_device = armpmu->plat_device; + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + + for (i = 0; i < irqs; ++i) { + if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + continue; + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, armpmu); + } +} + +static int +armpmu_reserve_hardware(struct arm_pmu *armpmu) +{ + int i, err, irq, irqs; + struct platform_device *pmu_device = armpmu->plat_device; + + if (!pmu_device) { + pr_err("no PMU device registered\n"); + return -ENODEV; + } + + irqs = min(pmu_device->num_resources, num_possible_cpus()); + if (irqs < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + for (i = 0; i < irqs; ++i) { + err = 0; + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + /* + * If we have a single PMU interrupt that we can't shift, + * assume that we're running on a uniprocessor machine and + * continue. Otherwise, continue without this interrupt. + */ + if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, i); + continue; + } + + err = request_irq(irq, armpmu->handle_irq, + IRQF_NOBALANCING, + "arm-pmu", armpmu); + if (err) { + pr_err("unable to request IRQ%d for ARM PMU counters\n", + irq); + armpmu_release_hardware(armpmu); + return err; + } + + cpumask_set_cpu(i, &armpmu->active_irqs); + } + + return 0; +} + +static void +hw_perf_event_destroy(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + atomic_t *active_events = &armpmu->active_events; + struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; + + if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { + armpmu_release_hardware(armpmu); + mutex_unlock(pmu_reserve_mutex); + } +} + +static int +event_requires_mode_exclusion(struct perf_event_attr *attr) +{ + return attr->exclude_idle || attr->exclude_user || + attr->exclude_kernel || attr->exclude_hv; +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct hw_perf_event *hwc = &event->hw; + int mapping, err; + + mapping = armpmu->map_event(event); + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + hwc->config_base = 0; + hwc->config = 0; + hwc->event_base = 0; + + /* + * Check whether we need to exclude the counter from certain modes. + */ + if ((!armpmu->set_event_filter || + armpmu->set_event_filter(hwc, &event->attr)) && + event_requires_mode_exclusion(&event->attr)) { + pr_debug("ARM performance counters do not support mode exclusion\n"); + return -EPERM; + } + + /* + * Store the event encoding into the config_base field. + */ + hwc->config_base |= (unsigned long)mapping; + + if (!hwc->sample_period) { + /* + * For non-sampling runs, limit the sample_period to half + * of the counter width. That way, the new counter value + * is far less likely to overtake the previous one unless + * you have some serious IRQ latency issues. + */ + hwc->sample_period = armpmu->max_period >> 1; + hwc->last_period = hwc->sample_period; + local64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) { + err = validate_group(event); + if (err) + return -EINVAL; + } + + return err; +} + +static int armpmu_event_init(struct perf_event *event) +{ + struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + int err = 0; + atomic_t *active_events = &armpmu->active_events; + + if (armpmu->map_event(event) == -ENOENT) + return -ENOENT; + + event->destroy = hw_perf_event_destroy; + + if (!atomic_inc_not_zero(active_events)) { + mutex_lock(&armpmu->reserve_mutex); + if (atomic_read(active_events) == 0) + err = armpmu_reserve_hardware(armpmu); + + if (!err) + atomic_inc(active_events); + mutex_unlock(&armpmu->reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static void armpmu_enable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + struct pmu_hw_events *hw_events = armpmu->get_hw_events(); + int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); + + if (enabled) + armpmu->start(); +} + +static void armpmu_disable(struct pmu *pmu) +{ + struct arm_pmu *armpmu = to_arm_pmu(pmu); + armpmu->stop(); +} + +static void __init armpmu_init(struct arm_pmu *armpmu) +{ + atomic_set(&armpmu->active_events, 0); + mutex_init(&armpmu->reserve_mutex); + + armpmu->pmu = (struct pmu) { + .pmu_enable = armpmu_enable, + .pmu_disable = armpmu_disable, + .event_init = armpmu_event_init, + .add = armpmu_add, + .del = armpmu_del, + .start = armpmu_start, + .stop = armpmu_stop, + .read = armpmu_read, + }; +} + +int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) +{ + armpmu_init(armpmu); + return perf_pmu_register(&armpmu->pmu, name, type); +} + +/* + * ARMv8 PMUv3 Performance Events handling code. + * Common event types. + */ +enum armv8_pmuv3_perf_types { + /* Required events. */ + ARMV8_PMUV3_PERFCTR_PMNC_SW_INCR = 0x00, + ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL = 0x03, + ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS = 0x04, + ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, + ARMV8_PMUV3_PERFCTR_CLOCK_CYCLES = 0x11, + ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED = 0x12, + + /* At least one of the following is required. */ + ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED = 0x08, + ARMV8_PMUV3_PERFCTR_OP_SPEC = 0x1B, + + /* Common architectural events. */ + ARMV8_PMUV3_PERFCTR_MEM_READ = 0x06, + ARMV8_PMUV3_PERFCTR_MEM_WRITE = 0x07, + ARMV8_PMUV3_PERFCTR_EXC_TAKEN = 0x09, + ARMV8_PMUV3_PERFCTR_EXC_EXECUTED = 0x0A, + ARMV8_PMUV3_PERFCTR_CID_WRITE = 0x0B, + ARMV8_PMUV3_PERFCTR_PC_WRITE = 0x0C, + ARMV8_PMUV3_PERFCTR_PC_IMM_BRANCH = 0x0D, + ARMV8_PMUV3_PERFCTR_PC_PROC_RETURN = 0x0E, + ARMV8_PMUV3_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F, + ARMV8_PMUV3_PERFCTR_TTBR_WRITE = 0x1C, + + /* Common microarchitectural events. */ + ARMV8_PMUV3_PERFCTR_L1_ICACHE_REFILL = 0x01, + ARMV8_PMUV3_PERFCTR_ITLB_REFILL = 0x02, + ARMV8_PMUV3_PERFCTR_DTLB_REFILL = 0x05, + ARMV8_PMUV3_PERFCTR_MEM_ACCESS = 0x13, + ARMV8_PMUV3_PERFCTR_L1_ICACHE_ACCESS = 0x14, + ARMV8_PMUV3_PERFCTR_L1_DCACHE_WB = 0x15, + ARMV8_PMUV3_PERFCTR_L2_CACHE_ACCESS = 0x16, + ARMV8_PMUV3_PERFCTR_L2_CACHE_REFILL = 0x17, + ARMV8_PMUV3_PERFCTR_L2_CACHE_WB = 0x18, + ARMV8_PMUV3_PERFCTR_BUS_ACCESS = 0x19, + ARMV8_PMUV3_PERFCTR_MEM_ERROR = 0x1A, + ARMV8_PMUV3_PERFCTR_BUS_CYCLES = 0x1D, + + /* + * This isn't an architected event. + * We detect this event number and use the cycle counter instead. + */ + ARMV8_PMUV3_PERFCTR_CPU_CYCLES = 0xFF, +}; + +/* PMUv3 HW events mapping. */ +static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Perf Events' indices + */ +#define ARMV8_IDX_CYCLE_COUNTER 0 +#define ARMV8_IDX_COUNTER0 1 +#define ARMV8_IDX_COUNTER_LAST (ARMV8_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) + +#define ARMV8_MAX_COUNTERS 32 +#define ARMV8_COUNTER_MASK (ARMV8_MAX_COUNTERS - 1) + +/* + * ARMv8 low level PMU access + */ + +/* + * Perf Event to low level counters mapping + */ +#define ARMV8_IDX_TO_COUNTER(x) \ + (((x) - ARMV8_IDX_COUNTER0) & ARMV8_COUNTER_MASK) + +/* + * Per-CPU PMCR: config reg + */ +#define ARMV8_PMCR_E (1 << 0) /* Enable all counters */ +#define ARMV8_PMCR_P (1 << 1) /* Reset all counters */ +#define ARMV8_PMCR_C (1 << 2) /* Cycle counter reset */ +#define ARMV8_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV8_PMCR_X (1 << 4) /* Export to ETM */ +#define ARMV8_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV8_PMCR_N_SHIFT 11 /* Number of counters supported */ +#define ARMV8_PMCR_N_MASK 0x1f +#define ARMV8_PMCR_MASK 0x3f /* Mask for writable bits */ + +/* + * PMOVSR: counters overflow flag status reg + */ +#define ARMV8_OVSR_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV8_OVERFLOWED_MASK ARMV8_OVSR_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV8_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ +#define ARMV8_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv3 + */ +#define ARMV8_EXCLUDE_EL1 (1 << 31) +#define ARMV8_EXCLUDE_EL0 (1 << 30) +#define ARMV8_INCLUDE_EL2 (1 << 27) + +static inline u32 armv8pmu_pmcr_read(void) +{ + u32 val; + asm volatile("mrs %0, pmcr_el0" : "=r" (val)); + return val; +} + +static inline void armv8pmu_pmcr_write(u32 val) +{ + val &= ARMV8_PMCR_MASK; + isb(); + asm volatile("msr pmcr_el0, %0" :: "r" (val)); +} + +static inline int armv8pmu_has_overflowed(u32 pmovsr) +{ + return pmovsr & ARMV8_OVERFLOWED_MASK; +} + +static inline int armv8pmu_counter_valid(int idx) +{ + return idx >= ARMV8_IDX_CYCLE_COUNTER && idx <= ARMV8_IDX_COUNTER_LAST; +} + +static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) +{ + int ret = 0; + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u checking wrong counter %d overflow status\n", + smp_processor_id(), idx); + } else { + counter = ARMV8_IDX_TO_COUNTER(idx); + ret = pmnc & BIT(counter); + } + + return ret; +} + +static inline int armv8pmu_select_counter(int idx) +{ + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u selecting wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; + } + + counter = ARMV8_IDX_TO_COUNTER(idx); + asm volatile("msr pmselr_el0, %0" :: "r" (counter)); + isb(); + + return idx; +} + +static inline u32 armv8pmu_read_counter(int idx) +{ + u32 value = 0; + + if (!armv8pmu_counter_valid(idx)) + pr_err("CPU%u reading wrong counter %d\n", + smp_processor_id(), idx); + else if (idx == ARMV8_IDX_CYCLE_COUNTER) + asm volatile("mrs %0, pmccntr_el0" : "=r" (value)); + else if (armv8pmu_select_counter(idx) == idx) + asm volatile("mrs %0, pmxevcntr_el0" : "=r" (value)); + + return value; +} + +static inline void armv8pmu_write_counter(int idx, u32 value) +{ + if (!armv8pmu_counter_valid(idx)) + pr_err("CPU%u writing wrong counter %d\n", + smp_processor_id(), idx); + else if (idx == ARMV8_IDX_CYCLE_COUNTER) + asm volatile("msr pmccntr_el0, %0" :: "r" (value)); + else if (armv8pmu_select_counter(idx) == idx) + asm volatile("msr pmxevcntr_el0, %0" :: "r" (value)); +} + +static inline void armv8pmu_write_evtype(int idx, u32 val) +{ + if (armv8pmu_select_counter(idx) == idx) { + val &= ARMV8_EVTYPE_MASK; + asm volatile("msr pmxevtyper_el0, %0" :: "r" (val)); + } +} + +static inline int armv8pmu_enable_counter(int idx) +{ + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; + } + + counter = ARMV8_IDX_TO_COUNTER(idx); + asm volatile("msr pmcntenset_el0, %0" :: "r" (BIT(counter))); + return idx; +} + +static inline int armv8pmu_disable_counter(int idx) +{ + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter %d\n", + smp_processor_id(), idx); + return -EINVAL; + } + + counter = ARMV8_IDX_TO_COUNTER(idx); + asm volatile("msr pmcntenclr_el0, %0" :: "r" (BIT(counter))); + return idx; +} + +static inline int armv8pmu_enable_intens(int idx) +{ + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; + } + + counter = ARMV8_IDX_TO_COUNTER(idx); + asm volatile("msr pmintenset_el1, %0" :: "r" (BIT(counter))); + return idx; +} + +static inline int armv8pmu_disable_intens(int idx) +{ + u32 counter; + + if (!armv8pmu_counter_valid(idx)) { + pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", + smp_processor_id(), idx); + return -EINVAL; + } + + counter = ARMV8_IDX_TO_COUNTER(idx); + asm volatile("msr pmintenclr_el1, %0" :: "r" (BIT(counter))); + isb(); + /* Clear the overflow flag in case an interrupt is pending. */ + asm volatile("msr pmovsclr_el0, %0" :: "r" (BIT(counter))); + isb(); + return idx; +} + +static inline u32 armv8pmu_getreset_flags(void) +{ + u32 value; + + /* Read */ + asm volatile("mrs %0, pmovsclr_el0" : "=r" (value)); + + /* Write to clear flags */ + value &= ARMV8_OVSR_MASK; + asm volatile("msr pmovsclr_el0, %0" :: "r" (value)); + + return value; +} + +static void armv8pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* + * Disable counter + */ + armv8pmu_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters). + */ + armv8pmu_write_evtype(idx, hwc->config_base); + + /* + * Enable interrupt for this counter + */ + armv8pmu_enable_intens(idx); + + /* + * Enable counter + */ + armv8pmu_enable_counter(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void armv8pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + + /* + * Disable counter and interrupt + */ + raw_spin_lock_irqsave(&events->pmu_lock, flags); + + /* + * Disable counter + */ + armv8pmu_disable_counter(idx); + + /* + * Disable interrupt for this counter + */ + armv8pmu_disable_intens(idx); + + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev) +{ + u32 pmovsr; + struct perf_sample_data data; + struct pmu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* + * Get and reset the IRQ flags + */ + pmovsr = armv8pmu_getreset_flags(); + + /* + * Did an overflow occur? + */ + if (!armv8pmu_has_overflowed(pmovsr)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx < cpu_pmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + /* Ignore if we don't have an event. */ + if (!event) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + perf_sample_data_init(&data, 0, hwc->last_period); + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, &data, regs)) + cpu_pmu->disable(hwc, idx); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts disabled. For + * platforms that can have the PMU interrupts raised as an NMI, this + * will not work. + */ + irq_work_run(); + + return IRQ_HANDLED; +} + +static void armv8pmu_start(void) +{ + unsigned long flags; + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Enable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static void armv8pmu_stop(void) +{ + unsigned long flags; + struct pmu_hw_events *events = cpu_pmu->get_hw_events(); + + raw_spin_lock_irqsave(&events->pmu_lock, flags); + /* Disable all counters */ + armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMCR_E); + raw_spin_unlock_irqrestore(&events->pmu_lock, flags); +} + +static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, + struct hw_perf_event *event) +{ + int idx; + unsigned long evtype = event->config_base & ARMV8_EVTYPE_EVENT; + + /* Always place a cycle counter into the cycle counter. */ + if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { + if (test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV8_IDX_CYCLE_COUNTER; + } + + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + + /* The counters are all in use. */ + return -EAGAIN; +} + +/* + * Add an event filter to a given event. This will only work for PMUv2 PMUs. + */ +static int armv8pmu_set_event_filter(struct hw_perf_event *event, + struct perf_event_attr *attr) +{ + unsigned long config_base = 0; + + if (attr->exclude_idle) + return -EPERM; + if (attr->exclude_user) + config_base |= ARMV8_EXCLUDE_EL0; + if (attr->exclude_kernel) + config_base |= ARMV8_EXCLUDE_EL1; + if (!attr->exclude_hv) + config_base |= ARMV8_INCLUDE_EL2; + + /* + * Install the filter into config_base as this is used to + * construct the event type. + */ + event->config_base = config_base; + + return 0; +} + +static void armv8pmu_reset(void *info) +{ + u32 idx, nb_cnt = cpu_pmu->num_events; + + /* The counter and interrupt enable registers are unknown at reset. */ + for (idx = ARMV8_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) + armv8pmu_disable_event(NULL, idx); + + /* Initialize & Reset PMNC: C and P bits. */ + armv8pmu_pmcr_write(ARMV8_PMCR_P | ARMV8_PMCR_C); + + /* Disable access from userspace. */ + asm volatile("msr pmuserenr_el0, %0" :: "r" (0)); +} + +static int armv8_pmuv3_map_event(struct perf_event *event) +{ + return map_cpu_event(event, &armv8_pmuv3_perf_map, + &armv8_pmuv3_perf_cache_map, 0xFF); +} + +static struct arm_pmu armv8pmu = { + .handle_irq = armv8pmu_handle_irq, + .enable = armv8pmu_enable_event, + .disable = armv8pmu_disable_event, + .read_counter = armv8pmu_read_counter, + .write_counter = armv8pmu_write_counter, + .get_event_idx = armv8pmu_get_event_idx, + .start = armv8pmu_start, + .stop = armv8pmu_stop, + .reset = armv8pmu_reset, + .max_period = (1LLU << 32) - 1, +}; + +static u32 __init armv8pmu_read_num_pmnc_events(void) +{ + u32 nb_cnt; + + /* Read the nb of CNTx counters supported from PMNC */ + nb_cnt = (armv8pmu_pmcr_read() >> ARMV8_PMCR_N_SHIFT) & ARMV8_PMCR_N_MASK; + + /* Add the CPU cycles counter and return */ + return nb_cnt + 1; +} + +static struct arm_pmu *__init armv8_pmuv3_pmu_init(void) +{ + armv8pmu.name = "arm/armv8-pmuv3"; + armv8pmu.map_event = armv8_pmuv3_map_event; + armv8pmu.num_events = armv8pmu_read_num_pmnc_events(); + armv8pmu.set_event_filter = armv8pmu_set_event_filter; + return &armv8pmu; +} + +/* + * Ensure the PMU has sane values out of reset. + * This requires SMP to be available, so exists as a separate initcall. + */ +static int __init +cpu_pmu_reset(void) +{ + if (cpu_pmu && cpu_pmu->reset) + return on_each_cpu(cpu_pmu->reset, NULL, 1); + return 0; +} +arch_initcall(cpu_pmu_reset); + +/* + * PMU platform driver and devicetree bindings. + */ +static struct of_device_id armpmu_of_device_ids[] = { + {.compatible = "arm,armv8-pmuv3"}, + {}, +}; + +static int __devinit armpmu_device_probe(struct platform_device *pdev) +{ + if (!cpu_pmu) + return -ENODEV; + + cpu_pmu->plat_device = pdev; + return 0; +} + +static struct platform_driver armpmu_driver = { + .driver = { + .name = "arm-pmu", + .of_match_table = armpmu_of_device_ids, + }, + .probe = armpmu_device_probe, +}; + +static int __init register_pmu_driver(void) +{ + return platform_driver_register(&armpmu_driver); +} +device_initcall(register_pmu_driver); + +static struct pmu_hw_events *armpmu_get_cpu_events(void) +{ + return &__get_cpu_var(cpu_hw_events); +} + +static void __init cpu_pmu_init(struct arm_pmu *armpmu) +{ + int cpu; + for_each_possible_cpu(cpu) { + struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); + events->events = per_cpu(hw_events, cpu); + events->used_mask = per_cpu(used_mask, cpu); + raw_spin_lock_init(&events->pmu_lock); + } + armpmu->get_hw_events = armpmu_get_cpu_events; +} + +static int __init init_hw_perf_events(void) +{ + u64 dfr = read_cpuid(ID_AA64DFR0_EL1); + + switch ((dfr >> 8) & 0xf) { + case 0x1: /* PMUv3 */ + cpu_pmu = armv8_pmuv3_pmu_init(); + break; + } + + if (cpu_pmu) { + pr_info("enabled with %s PMU driver, %d counters available\n", + cpu_pmu->name, cpu_pmu->num_events); + cpu_pmu_init(cpu_pmu); + armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); + } else { + pr_info("no hardware support available\n"); + } + + return 0; +} +early_initcall(init_hw_perf_events); + +/* + * Callchain handling code. + */ +struct frame_tail { + struct frame_tail __user *fp; + unsigned long lr; +} __attribute__((packed)); + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static struct frame_tail __user * +user_backtrace(struct frame_tail __user *tail, + struct perf_callchain_entry *entry) +{ + struct frame_tail buftail; + unsigned long err; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + return NULL; + + pagefault_disable(); + err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); + pagefault_enable(); + + if (err) + return NULL; + + perf_callchain_store(entry, buftail.lr); + + /* + * Frame pointers should strictly progress back up the stack + * (towards higher addresses). + */ + if (tail >= buftail.fp) + return NULL; + + return buftail.fp; +} + +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + struct frame_tail __user *tail; + + tail = (struct frame_tail __user *)regs->regs[29]; + + while (entry->nr < PERF_MAX_STACK_DEPTH && + tail && !((unsigned long)tail & 0xf)) + tail = user_backtrace(tail, entry); +} + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int callchain_trace(struct stackframe *frame, void *data) +{ + struct perf_callchain_entry *entry = data; + perf_callchain_store(entry, frame->pc); + return 0; +} + +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) +{ + struct stackframe frame; + + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; + walk_stackframe(&frame, callchain_trace, entry); +} diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c new file mode 100644 index 000000000000..f22965ea1cfc --- /dev/null +++ b/arch/arm64/kernel/process.c @@ -0,0 +1,408 @@ +/* + * Based on arch/arm/kernel/process.c + * + * Original Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1996-2000 Russell King - Converted to ARM. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <stdarg.h> + +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/stddef.h> +#include <linux/unistd.h> +#include <linux/user.h> +#include <linux/delay.h> +#include <linux/reboot.h> +#include <linux/interrupt.h> +#include <linux/kallsyms.h> +#include <linux/init.h> +#include <linux/cpu.h> +#include <linux/elfcore.h> +#include <linux/pm.h> +#include <linux/tick.h> +#include <linux/utsname.h> +#include <linux/uaccess.h> +#include <linux/random.h> +#include <linux/hw_breakpoint.h> +#include <linux/personality.h> +#include <linux/notifier.h> + +#include <asm/compat.h> +#include <asm/cacheflush.h> +#include <asm/processor.h> +#include <asm/stacktrace.h> +#include <asm/fpsimd.h> + +static void setup_restart(void) +{ + /* + * Tell the mm system that we are going to reboot - + * we may need it to insert some 1:1 mappings so that + * soft boot works. + */ + setup_mm_for_reboot(); + + /* Clean and invalidate caches */ + flush_cache_all(); + + /* Turn D-cache off */ + cpu_cache_off(); + + /* Push out any further dirty data, and ensure cache is empty */ + flush_cache_all(); +} + +void soft_restart(unsigned long addr) +{ + setup_restart(); + cpu_reset(addr); +} + +/* + * Function pointers to optional machine specific functions + */ +void (*pm_power_off)(void); +EXPORT_SYMBOL_GPL(pm_power_off); + +void (*pm_restart)(const char *cmd); +EXPORT_SYMBOL_GPL(pm_restart); + + +/* + * This is our default idle handler. + */ +static void default_idle(void) +{ + /* + * This should do all the clock switching and wait for interrupt + * tricks + */ + cpu_do_idle(); + local_irq_enable(); +} + +void (*pm_idle)(void) = default_idle; +EXPORT_SYMBOL_GPL(pm_idle); + +/* + * The idle thread, has rather strange semantics for calling pm_idle, + * but this is what x86 does and we need to do the same, so that + * things like cpuidle get called in the same way. The only difference + * is that we always respect 'hlt_counter' to prevent low power idle. + */ +void cpu_idle(void) +{ + local_fiq_enable(); + + /* endless idle loop with no priority at all */ + while (1) { + tick_nohz_idle_enter(); + rcu_idle_enter(); + while (!need_resched()) { + /* + * We need to disable interrupts here to ensure + * we don't miss a wakeup call. + */ + local_irq_disable(); + if (!need_resched()) { + stop_critical_timings(); + pm_idle(); + start_critical_timings(); + /* + * pm_idle functions should always return + * with IRQs enabled. + */ + WARN_ON(irqs_disabled()); + } else { + local_irq_enable(); + } + } + rcu_idle_exit(); + tick_nohz_idle_exit(); + schedule_preempt_disabled(); + } +} + +void machine_shutdown(void) +{ +#ifdef CONFIG_SMP + smp_send_stop(); +#endif +} + +void machine_halt(void) +{ + machine_shutdown(); + while (1); +} + +void machine_power_off(void) +{ + machine_shutdown(); + if (pm_power_off) + pm_power_off(); +} + +void machine_restart(char *cmd) +{ + machine_shutdown(); + + /* Disable interrupts first */ + local_irq_disable(); + local_fiq_disable(); + + /* Now call the architecture specific reboot code. */ + if (pm_restart) + pm_restart(cmd); + + /* + * Whoops - the architecture was unable to reboot. + */ + printk("Reboot failed -- System halted\n"); + while (1); +} + +void __show_regs(struct pt_regs *regs) +{ + int i; + + printk("CPU: %d %s (%s %.*s)\n", + raw_smp_processor_id(), print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); + print_symbol("PC is at %s\n", instruction_pointer(regs)); + print_symbol("LR is at %s\n", regs->regs[30]); + printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", + regs->pc, regs->regs[30], regs->pstate); + printk("sp : %016llx\n", regs->sp); + for (i = 29; i >= 0; i--) { + printk("x%-2d: %016llx ", i, regs->regs[i]); + if (i % 2 == 0) + printk("\n"); + } + printk("\n"); +} + +void show_regs(struct pt_regs * regs) +{ + printk("\n"); + printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm); + __show_regs(regs); +} + +/* + * Free current thread data structures etc.. + */ +void exit_thread(void) +{ +} + +void flush_thread(void) +{ + fpsimd_flush_thread(); + flush_ptrace_hw_breakpoint(current); +} + +void release_thread(struct task_struct *dead_task) +{ +} + +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + fpsimd_save_state(¤t->thread.fpsimd_state); + *dst = *src; + return 0; +} + +asmlinkage void ret_from_fork(void) asm("ret_from_fork"); + +int copy_thread(unsigned long clone_flags, unsigned long stack_start, + unsigned long stk_sz, struct task_struct *p, + struct pt_regs *regs) +{ + struct pt_regs *childregs = task_pt_regs(p); + unsigned long tls = p->thread.tp_value; + + *childregs = *regs; + childregs->regs[0] = 0; + + if (is_compat_thread(task_thread_info(p))) + childregs->compat_sp = stack_start; + else { + /* + * Read the current TLS pointer from tpidr_el0 as it may be + * out-of-sync with the saved value. + */ + asm("mrs %0, tpidr_el0" : "=r" (tls)); + childregs->sp = stack_start; + } + + memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); + p->thread.cpu_context.sp = (unsigned long)childregs; + p->thread.cpu_context.pc = (unsigned long)ret_from_fork; + + /* If a TLS pointer was passed to clone, use that for the new thread. */ + if (clone_flags & CLONE_SETTLS) + tls = regs->regs[3]; + p->thread.tp_value = tls; + + ptrace_hw_copy_thread(p); + + return 0; +} + +static void tls_thread_switch(struct task_struct *next) +{ + unsigned long tpidr, tpidrro; + + if (!is_compat_task()) { + asm("mrs %0, tpidr_el0" : "=r" (tpidr)); + current->thread.tp_value = tpidr; + } + + if (is_compat_thread(task_thread_info(next))) { + tpidr = 0; + tpidrro = next->thread.tp_value; + } else { + tpidr = next->thread.tp_value; + tpidrro = 0; + } + + asm( + " msr tpidr_el0, %0\n" + " msr tpidrro_el0, %1" + : : "r" (tpidr), "r" (tpidrro)); +} + +/* + * Thread switching. + */ +struct task_struct *__switch_to(struct task_struct *prev, + struct task_struct *next) +{ + struct task_struct *last; + + fpsimd_thread_switch(next); + tls_thread_switch(next); + hw_breakpoint_thread_switch(next); + + /* the actual thread switch */ + last = cpu_switch_to(prev, next); + + return last; +} + +/* + * Fill in the task's elfregs structure for a core dump. + */ +int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) +{ + elf_core_copy_regs(elfregs, task_pt_regs(t)); + return 1; +} + +/* + * fill in the fpe structure for a core dump... + */ +int dump_fpu (struct pt_regs *regs, struct user_fp *fp) +{ + return 0; +} +EXPORT_SYMBOL(dump_fpu); + +/* + * Shuffle the argument into the correct register before calling the + * thread function. x1 is the thread argument, x2 is the pointer to + * the thread function, and x3 points to the exit function. + */ +extern void kernel_thread_helper(void); +asm( ".section .text\n" +" .align\n" +" .type kernel_thread_helper, #function\n" +"kernel_thread_helper:\n" +" mov x0, x1\n" +" mov x30, x3\n" +" br x2\n" +" .size kernel_thread_helper, . - kernel_thread_helper\n" +" .previous"); + +#define kernel_thread_exit do_exit + +/* + * Create a kernel thread. + */ +pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) +{ + struct pt_regs regs; + + memset(®s, 0, sizeof(regs)); + + regs.regs[1] = (unsigned long)arg; + regs.regs[2] = (unsigned long)fn; + regs.regs[3] = (unsigned long)kernel_thread_exit; + regs.pc = (unsigned long)kernel_thread_helper; + regs.pstate = PSR_MODE_EL1h; + + return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); +} +EXPORT_SYMBOL(kernel_thread); + +unsigned long get_wchan(struct task_struct *p) +{ + struct stackframe frame; + int count = 0; + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + + frame.fp = thread_saved_fp(p); + frame.sp = thread_saved_sp(p); + frame.pc = thread_saved_pc(p); + do { + int ret = unwind_frame(&frame); + if (ret < 0) + return 0; + if (!in_sched_functions(frame.pc)) + return frame.pc; + } while (count ++ < 16); + return 0; +} + +unsigned long arch_align_stack(unsigned long sp) +{ + if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) + sp -= get_random_int() & ~PAGE_MASK; + return sp & ~0xf; +} + +static unsigned long randomize_base(unsigned long base) +{ + unsigned long range_end = base + (STACK_RND_MASK << PAGE_SHIFT) + 1; + return randomize_range(base, range_end, 0) ? : base; +} + +unsigned long arch_randomize_brk(struct mm_struct *mm) +{ + return randomize_base(mm->brk); +} + +unsigned long randomize_et_dyn(unsigned long base) +{ + return randomize_base(base); +} diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c new file mode 100644 index 000000000000..ac3550ecc7b5 --- /dev/null +++ b/arch/arm64/kernel/ptrace.c @@ -0,0 +1,1126 @@ +/* + * Based on arch/arm/kernel/ptrace.c + * + * By Ross Biro 1/23/92 + * edited by Linus Torvalds + * ARM modifications Copyright (C) 2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/smp.h> +#include <linux/ptrace.h> +#include <linux/user.h> +#include <linux/security.h> +#include <linux/init.h> +#include <linux/signal.h> +#include <linux/uaccess.h> +#include <linux/perf_event.h> +#include <linux/hw_breakpoint.h> +#include <linux/regset.h> +#include <linux/tracehook.h> +#include <linux/elf.h> + +#include <asm/compat.h> +#include <asm/debug-monitors.h> +#include <asm/pgtable.h> +#include <asm/traps.h> +#include <asm/system_misc.h> + +/* + * TODO: does not yet catch signals sent when the child dies. + * in exit.c or in signal.c. + */ + +/* + * Called by kernel/ptrace.c when detaching.. + */ +void ptrace_disable(struct task_struct *child) +{ +} + +/* + * Handle hitting a breakpoint. + */ +static int ptrace_break(struct pt_regs *regs) +{ + siginfo_t info = { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = TRAP_BRKPT, + .si_addr = (void __user *)instruction_pointer(regs), + }; + + force_sig_info(SIGTRAP, &info, current); + return 0; +} + +static int arm64_break_trap(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + return ptrace_break(regs); +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT +/* + * Handle hitting a HW-breakpoint. + */ +static void ptrace_hbptriggered(struct perf_event *bp, + struct perf_sample_data *data, + struct pt_regs *regs) +{ + struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp); + siginfo_t info = { + .si_signo = SIGTRAP, + .si_errno = 0, + .si_code = TRAP_HWBKPT, + .si_addr = (void __user *)(bkpt->trigger), + }; + +#ifdef CONFIG_COMPAT + int i; + + if (!is_compat_task()) + goto send_sig; + + for (i = 0; i < ARM_MAX_BRP; ++i) { + if (current->thread.debug.hbp_break[i] == bp) { + info.si_errno = (i << 1) + 1; + break; + } + } + for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { + if (current->thread.debug.hbp_watch[i] == bp) { + info.si_errno = -((i << 1) + 1); + break; + } + } + +send_sig: +#endif + force_sig_info(SIGTRAP, &info, current); +} + +/* + * Unregister breakpoints from this task and reset the pointers in + * the thread_struct. + */ +void flush_ptrace_hw_breakpoint(struct task_struct *tsk) +{ + int i; + struct thread_struct *t = &tsk->thread; + + for (i = 0; i < ARM_MAX_BRP; i++) { + if (t->debug.hbp_break[i]) { + unregister_hw_breakpoint(t->debug.hbp_break[i]); + t->debug.hbp_break[i] = NULL; + } + } + + for (i = 0; i < ARM_MAX_WRP; i++) { + if (t->debug.hbp_watch[i]) { + unregister_hw_breakpoint(t->debug.hbp_watch[i]); + t->debug.hbp_watch[i] = NULL; + } + } +} + +void ptrace_hw_copy_thread(struct task_struct *tsk) +{ + memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); +} + +static struct perf_event *ptrace_hbp_get_event(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp = ERR_PTR(-EINVAL); + + switch (note_type) { + case NT_ARM_HW_BREAK: + if (idx < ARM_MAX_BRP) + bp = tsk->thread.debug.hbp_break[idx]; + break; + case NT_ARM_HW_WATCH: + if (idx < ARM_MAX_WRP) + bp = tsk->thread.debug.hbp_watch[idx]; + break; + } + + return bp; +} + +static int ptrace_hbp_set_event(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + struct perf_event *bp) +{ + int err = -EINVAL; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if (idx < ARM_MAX_BRP) { + tsk->thread.debug.hbp_break[idx] = bp; + err = 0; + } + break; + case NT_ARM_HW_WATCH: + if (idx < ARM_MAX_WRP) { + tsk->thread.debug.hbp_watch[idx] = bp; + err = 0; + } + break; + } + + return err; +} + +static struct perf_event *ptrace_hbp_create(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp; + struct perf_event_attr attr; + int err, type; + + switch (note_type) { + case NT_ARM_HW_BREAK: + type = HW_BREAKPOINT_X; + break; + case NT_ARM_HW_WATCH: + type = HW_BREAKPOINT_RW; + break; + default: + return ERR_PTR(-EINVAL); + } + + ptrace_breakpoint_init(&attr); + + /* + * Initialise fields to sane defaults + * (i.e. values that will pass validation). + */ + attr.bp_addr = 0; + attr.bp_len = HW_BREAKPOINT_LEN_4; + attr.bp_type = type; + attr.disabled = 1; + + bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk); + if (IS_ERR(bp)) + return bp; + + err = ptrace_hbp_set_event(note_type, tsk, idx, bp); + if (err) + return ERR_PTR(err); + + return bp; +} + +static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type, + struct arch_hw_breakpoint_ctrl ctrl, + struct perf_event_attr *attr) +{ + int err, len, type; + + err = arch_bp_generic_fields(ctrl, &len, &type); + if (err) + return err; + + switch (note_type) { + case NT_ARM_HW_BREAK: + if ((type & HW_BREAKPOINT_X) != type) + return -EINVAL; + break; + case NT_ARM_HW_WATCH: + if ((type & HW_BREAKPOINT_RW) != type) + return -EINVAL; + break; + default: + return -EINVAL; + } + + attr->bp_len = len; + attr->bp_type = type; + attr->disabled = !ctrl.enabled; + + return 0; +} + +static int ptrace_hbp_get_resource_info(unsigned int note_type, u32 *info) +{ + u8 num; + u32 reg = 0; + + switch (note_type) { + case NT_ARM_HW_BREAK: + num = hw_breakpoint_slots(TYPE_INST); + break; + case NT_ARM_HW_WATCH: + num = hw_breakpoint_slots(TYPE_DATA); + break; + default: + return -EINVAL; + } + + reg |= debug_monitors_arch(); + reg <<= 8; + reg |= num; + + *info = reg; + return 0; +} + +static int ptrace_hbp_get_ctrl(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u32 *ctrl) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (IS_ERR(bp)) + return PTR_ERR(bp); + + *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0; + return 0; +} + +static int ptrace_hbp_get_addr(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u64 *addr) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (IS_ERR(bp)) + return PTR_ERR(bp); + + *addr = bp ? bp->attr.bp_addr : 0; + return 0; +} + +static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx) +{ + struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx); + + if (!bp) + bp = ptrace_hbp_create(note_type, tsk, idx); + + return bp; +} + +static int ptrace_hbp_set_ctrl(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u32 uctrl) +{ + int err; + struct perf_event *bp; + struct perf_event_attr attr; + struct arch_hw_breakpoint_ctrl ctrl; + + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + return err; + } + + attr = bp->attr; + decode_ctrl_reg(uctrl, &ctrl); + err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr); + if (err) + return err; + + return modify_user_hw_breakpoint(bp, &attr); +} + +static int ptrace_hbp_set_addr(unsigned int note_type, + struct task_struct *tsk, + unsigned long idx, + u64 addr) +{ + int err; + struct perf_event *bp; + struct perf_event_attr attr; + + bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx); + if (IS_ERR(bp)) { + err = PTR_ERR(bp); + return err; + } + + attr = bp->attr; + attr.bp_addr = addr; + err = modify_user_hw_breakpoint(bp, &attr); + return err; +} + +#define PTRACE_HBP_ADDR_SZ sizeof(u64) +#define PTRACE_HBP_CTRL_SZ sizeof(u32) +#define PTRACE_HBP_REG_OFF sizeof(u32) + +static int hw_break_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + unsigned int note_type = regset->core_note_type; + int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + u32 info, ctrl; + u64 addr; + + /* Resource info */ + ret = ptrace_hbp_get_resource_info(note_type, &info); + if (ret) + return ret; + + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &info, 0, 4); + if (ret) + return ret; + + /* (address, ctrl) registers */ + limit = regset->n * regset->size; + while (count && offset < limit) { + ret = ptrace_hbp_get_addr(note_type, target, idx, &addr); + if (ret) + return ret; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &addr, + offset, offset + PTRACE_HBP_ADDR_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_ADDR_SZ; + + ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl); + if (ret) + return ret; + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); + if (ret) + return ret; + offset += PTRACE_HBP_CTRL_SZ; + idx++; + } + + return 0; +} + +static int hw_break_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + unsigned int note_type = regset->core_note_type; + int ret, idx = 0, offset = PTRACE_HBP_REG_OFF, limit; + u32 ctrl; + u64 addr; + + /* Resource info */ + ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, 4); + if (ret) + return ret; + + /* (address, ctrl) registers */ + limit = regset->n * regset->size; + while (count && offset < limit) { + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr, + offset, offset + PTRACE_HBP_ADDR_SZ); + if (ret) + return ret; + ret = ptrace_hbp_set_addr(note_type, target, idx, addr); + if (ret) + return ret; + offset += PTRACE_HBP_ADDR_SZ; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl, + offset, offset + PTRACE_HBP_CTRL_SZ); + if (ret) + return ret; + ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl); + if (ret) + return ret; + offset += PTRACE_HBP_CTRL_SZ; + idx++; + } + + return 0; +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +static int gpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_pt_regs *uregs = &task_pt_regs(target)->user_regs; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); +} + +static int gpr_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_pt_regs newregs; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1); + if (ret) + return ret; + + if (!valid_user_regs(&newregs)) + return -EINVAL; + + task_pt_regs(target)->user_regs = newregs; + return 0; +} + +/* + * TODO: update fp accessors for lazy context switching (sync/flush hwstate) + */ +static int fpr_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_fpsimd_state *uregs; + uregs = &target->thread.fpsimd_state.user_fpsimd; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, -1); +} + +static int fpr_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + struct user_fpsimd_state newstate; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1); + if (ret) + return ret; + + target->thread.fpsimd_state.user_fpsimd = newstate; + return ret; +} + +static int tls_get(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + unsigned long *tls = &target->thread.tp_value; + return user_regset_copyout(&pos, &count, &kbuf, &ubuf, tls, 0, -1); +} + +static int tls_set(struct task_struct *target, const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + int ret; + unsigned long tls; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1); + if (ret) + return ret; + + target->thread.tp_value = tls; + return ret; +} + +enum aarch64_regset { + REGSET_GPR, + REGSET_FPR, + REGSET_TLS, +#ifdef CONFIG_HAVE_HW_BREAKPOINT + REGSET_HW_BREAK, + REGSET_HW_WATCH, +#endif +}; + +static const struct user_regset aarch64_regsets[] = { + [REGSET_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = sizeof(struct user_pt_regs) / sizeof(u64), + .size = sizeof(u64), + .align = sizeof(u64), + .get = gpr_get, + .set = gpr_set + }, + [REGSET_FPR] = { + .core_note_type = NT_PRFPREG, + .n = sizeof(struct user_fpsimd_state) / sizeof(u32), + /* + * We pretend we have 32-bit registers because the fpsr and + * fpcr are 32-bits wide. + */ + .size = sizeof(u32), + .align = sizeof(u32), + .get = fpr_get, + .set = fpr_set + }, + [REGSET_TLS] = { + .core_note_type = NT_ARM_TLS, + .n = 1, + .size = sizeof(void *), + .align = sizeof(void *), + .get = tls_get, + .set = tls_set, + }, +#ifdef CONFIG_HAVE_HW_BREAKPOINT + [REGSET_HW_BREAK] = { + .core_note_type = NT_ARM_HW_BREAK, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .get = hw_break_get, + .set = hw_break_set, + }, + [REGSET_HW_WATCH] = { + .core_note_type = NT_ARM_HW_WATCH, + .n = sizeof(struct user_hwdebug_state) / sizeof(u32), + .size = sizeof(u32), + .align = sizeof(u32), + .get = hw_break_get, + .set = hw_break_set, + }, +#endif +}; + +static const struct user_regset_view user_aarch64_view = { + .name = "aarch64", .e_machine = EM_AARCH64, + .regsets = aarch64_regsets, .n = ARRAY_SIZE(aarch64_regsets) +}; + +#ifdef CONFIG_COMPAT +#include <linux/compat.h> + +enum compat_regset { + REGSET_COMPAT_GPR, + REGSET_COMPAT_VFP, +}; + +static int compat_gpr_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + int ret = 0; + unsigned int i, start, num_regs; + + /* Calculate the number of AArch32 registers contained in count */ + num_regs = count / regset->size; + + /* Convert pos into an register number */ + start = pos / regset->size; + + if (start + num_regs > regset->n) + return -EIO; + + for (i = 0; i < num_regs; ++i) { + unsigned int idx = start + i; + void *reg; + + switch (idx) { + case 15: + reg = (void *)&task_pt_regs(target)->pc; + break; + case 16: + reg = (void *)&task_pt_regs(target)->pstate; + break; + case 17: + reg = (void *)&task_pt_regs(target)->orig_x0; + break; + default: + reg = (void *)&task_pt_regs(target)->regs[idx]; + } + + ret = copy_to_user(ubuf, reg, sizeof(compat_ulong_t)); + + if (ret) + break; + else + ubuf += sizeof(compat_ulong_t); + } + + return ret; +} + +static int compat_gpr_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct pt_regs newregs; + int ret = 0; + unsigned int i, start, num_regs; + + /* Calculate the number of AArch32 registers contained in count */ + num_regs = count / regset->size; + + /* Convert pos into an register number */ + start = pos / regset->size; + + if (start + num_regs > regset->n) + return -EIO; + + newregs = *task_pt_regs(target); + + for (i = 0; i < num_regs; ++i) { + unsigned int idx = start + i; + void *reg; + + switch (idx) { + case 15: + reg = (void *)&newregs.pc; + break; + case 16: + reg = (void *)&newregs.pstate; + break; + case 17: + reg = (void *)&newregs.orig_x0; + break; + default: + reg = (void *)&newregs.regs[idx]; + } + + ret = copy_from_user(reg, ubuf, sizeof(compat_ulong_t)); + + if (ret) + goto out; + else + ubuf += sizeof(compat_ulong_t); + } + + if (valid_user_regs(&newregs.user_regs)) + *task_pt_regs(target) = newregs; + else + ret = -EINVAL; + +out: + return ret; +} + +static int compat_vfp_get(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + void *kbuf, void __user *ubuf) +{ + struct user_fpsimd_state *uregs; + compat_ulong_t fpscr; + int ret; + + uregs = &target->thread.fpsimd_state.user_fpsimd; + + /* + * The VFP registers are packed into the fpsimd_state, so they all sit + * nicely together for us. We just need to create the fpscr separately. + */ + ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, uregs, 0, + VFP_STATE_SIZE - sizeof(compat_ulong_t)); + + if (count && !ret) { + fpscr = (uregs->fpsr & VFP_FPSCR_STAT_MASK) | + (uregs->fpcr & VFP_FPSCR_CTRL_MASK); + ret = put_user(fpscr, (compat_ulong_t *)ubuf); + } + + return ret; +} + +static int compat_vfp_set(struct task_struct *target, + const struct user_regset *regset, + unsigned int pos, unsigned int count, + const void *kbuf, const void __user *ubuf) +{ + struct user_fpsimd_state *uregs; + compat_ulong_t fpscr; + int ret; + + if (pos + count > VFP_STATE_SIZE) + return -EIO; + + uregs = &target->thread.fpsimd_state.user_fpsimd; + + ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, + VFP_STATE_SIZE - sizeof(compat_ulong_t)); + + if (count && !ret) { + ret = get_user(fpscr, (compat_ulong_t *)ubuf); + uregs->fpsr = fpscr & VFP_FPSCR_STAT_MASK; + uregs->fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + } + + return ret; +} + +static const struct user_regset aarch32_regsets[] = { + [REGSET_COMPAT_GPR] = { + .core_note_type = NT_PRSTATUS, + .n = COMPAT_ELF_NGREG, + .size = sizeof(compat_elf_greg_t), + .align = sizeof(compat_elf_greg_t), + .get = compat_gpr_get, + .set = compat_gpr_set + }, + [REGSET_COMPAT_VFP] = { + .core_note_type = NT_ARM_VFP, + .n = VFP_STATE_SIZE / sizeof(compat_ulong_t), + .size = sizeof(compat_ulong_t), + .align = sizeof(compat_ulong_t), + .get = compat_vfp_get, + .set = compat_vfp_set + }, +}; + +static const struct user_regset_view user_aarch32_view = { + .name = "aarch32", .e_machine = EM_ARM, + .regsets = aarch32_regsets, .n = ARRAY_SIZE(aarch32_regsets) +}; + +int aarch32_break_trap(struct pt_regs *regs) +{ + unsigned int instr; + bool bp = false; + void __user *pc = (void __user *)instruction_pointer(regs); + + if (compat_thumb_mode(regs)) { + /* get 16-bit Thumb instruction */ + get_user(instr, (u16 __user *)pc); + if (instr == AARCH32_BREAK_THUMB2_LO) { + /* get second half of 32-bit Thumb-2 instruction */ + get_user(instr, (u16 __user *)(pc + 2)); + bp = instr == AARCH32_BREAK_THUMB2_HI; + } else { + bp = instr == AARCH32_BREAK_THUMB; + } + } else { + /* 32-bit ARM instruction */ + get_user(instr, (u32 __user *)pc); + bp = (instr & ~0xf0000000) == AARCH32_BREAK_ARM; + } + + if (bp) + return ptrace_break(regs); + return 1; +} + +static int compat_ptrace_read_user(struct task_struct *tsk, compat_ulong_t off, + compat_ulong_t __user *ret) +{ + compat_ulong_t tmp; + + if (off & 3) + return -EIO; + + if (off == PT_TEXT_ADDR) + tmp = tsk->mm->start_code; + else if (off == PT_DATA_ADDR) + tmp = tsk->mm->start_data; + else if (off == PT_TEXT_END_ADDR) + tmp = tsk->mm->end_code; + else if (off < sizeof(compat_elf_gregset_t)) + return copy_regset_to_user(tsk, &user_aarch32_view, + REGSET_COMPAT_GPR, off, + sizeof(compat_ulong_t), ret); + else if (off >= COMPAT_USER_SZ) + return -EIO; + else + tmp = 0; + + return put_user(tmp, ret); +} + +static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off, + compat_ulong_t val) +{ + int ret; + + if (off & 3 || off >= COMPAT_USER_SZ) + return -EIO; + + if (off >= sizeof(compat_elf_gregset_t)) + return 0; + + ret = copy_regset_from_user(tsk, &user_aarch32_view, + REGSET_COMPAT_GPR, off, + sizeof(compat_ulong_t), + &val); + return ret; +} + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + +/* + * Convert a virtual register number into an index for a thread_info + * breakpoint array. Breakpoints are identified using positive numbers + * whilst watchpoints are negative. The registers are laid out as pairs + * of (address, control), each pair mapping to a unique hw_breakpoint struct. + * Register 0 is reserved for describing resource information. + */ +static int compat_ptrace_hbp_num_to_idx(compat_long_t num) +{ + return (abs(num) - 1) >> 1; +} + +static int compat_ptrace_hbp_get_resource_info(u32 *kdata) +{ + u8 num_brps, num_wrps, debug_arch, wp_len; + u32 reg = 0; + + num_brps = hw_breakpoint_slots(TYPE_INST); + num_wrps = hw_breakpoint_slots(TYPE_DATA); + + debug_arch = debug_monitors_arch(); + wp_len = 8; + reg |= debug_arch; + reg <<= 8; + reg |= wp_len; + reg <<= 8; + reg |= num_wrps; + reg <<= 8; + reg |= num_brps; + + *kdata = reg; + return 0; +} + +static int compat_ptrace_hbp_get(unsigned int note_type, + struct task_struct *tsk, + compat_long_t num, + u32 *kdata) +{ + u64 addr = 0; + u32 ctrl = 0; + + int err, idx = compat_ptrace_hbp_num_to_idx(num);; + + if (num & 1) { + err = ptrace_hbp_get_addr(note_type, tsk, idx, &addr); + *kdata = (u32)addr; + } else { + err = ptrace_hbp_get_ctrl(note_type, tsk, idx, &ctrl); + *kdata = ctrl; + } + + return err; +} + +static int compat_ptrace_hbp_set(unsigned int note_type, + struct task_struct *tsk, + compat_long_t num, + u32 *kdata) +{ + u64 addr; + u32 ctrl; + + int err, idx = compat_ptrace_hbp_num_to_idx(num); + + if (num & 1) { + addr = *kdata; + err = ptrace_hbp_set_addr(note_type, tsk, idx, addr); + } else { + ctrl = *kdata; + err = ptrace_hbp_set_ctrl(note_type, tsk, idx, ctrl); + } + + return err; +} + +static int compat_ptrace_gethbpregs(struct task_struct *tsk, compat_long_t num, + compat_ulong_t __user *data) +{ + int ret; + u32 kdata; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + /* Watchpoint */ + if (num < 0) { + ret = compat_ptrace_hbp_get(NT_ARM_HW_WATCH, tsk, num, &kdata); + /* Resource info */ + } else if (num == 0) { + ret = compat_ptrace_hbp_get_resource_info(&kdata); + /* Breakpoint */ + } else { + ret = compat_ptrace_hbp_get(NT_ARM_HW_BREAK, tsk, num, &kdata); + } + set_fs(old_fs); + + if (!ret) + ret = put_user(kdata, data); + + return ret; +} + +static int compat_ptrace_sethbpregs(struct task_struct *tsk, compat_long_t num, + compat_ulong_t __user *data) +{ + int ret; + u32 kdata = 0; + mm_segment_t old_fs = get_fs(); + + if (num == 0) + return 0; + + ret = get_user(kdata, data); + if (ret) + return ret; + + set_fs(KERNEL_DS); + if (num < 0) + ret = compat_ptrace_hbp_set(NT_ARM_HW_WATCH, tsk, num, &kdata); + else + ret = compat_ptrace_hbp_set(NT_ARM_HW_BREAK, tsk, num, &kdata); + set_fs(old_fs); + + return ret; +} +#endif /* CONFIG_HAVE_HW_BREAKPOINT */ + +long compat_arch_ptrace(struct task_struct *child, compat_long_t request, + compat_ulong_t caddr, compat_ulong_t cdata) +{ + unsigned long addr = caddr; + unsigned long data = cdata; + void __user *datap = compat_ptr(data); + int ret; + + switch (request) { + case PTRACE_PEEKUSR: + ret = compat_ptrace_read_user(child, addr, datap); + break; + + case PTRACE_POKEUSR: + ret = compat_ptrace_write_user(child, addr, data); + break; + + case COMPAT_PTRACE_GETREGS: + ret = copy_regset_to_user(child, + &user_aarch32_view, + REGSET_COMPAT_GPR, + 0, sizeof(compat_elf_gregset_t), + datap); + break; + + case COMPAT_PTRACE_SETREGS: + ret = copy_regset_from_user(child, + &user_aarch32_view, + REGSET_COMPAT_GPR, + 0, sizeof(compat_elf_gregset_t), + datap); + break; + + case COMPAT_PTRACE_GET_THREAD_AREA: + ret = put_user((compat_ulong_t)child->thread.tp_value, + (compat_ulong_t __user *)datap); + break; + + case COMPAT_PTRACE_SET_SYSCALL: + task_pt_regs(child)->syscallno = data; + ret = 0; + break; + + case COMPAT_PTRACE_GETVFPREGS: + ret = copy_regset_to_user(child, + &user_aarch32_view, + REGSET_COMPAT_VFP, + 0, VFP_STATE_SIZE, + datap); + break; + + case COMPAT_PTRACE_SETVFPREGS: + ret = copy_regset_from_user(child, + &user_aarch32_view, + REGSET_COMPAT_VFP, + 0, VFP_STATE_SIZE, + datap); + break; + +#ifdef CONFIG_HAVE_HW_BREAKPOINT + case COMPAT_PTRACE_GETHBPREGS: + ret = compat_ptrace_gethbpregs(child, addr, datap); + break; + + case COMPAT_PTRACE_SETHBPREGS: + ret = compat_ptrace_sethbpregs(child, addr, datap); + break; +#endif + + default: + ret = compat_ptrace_request(child, request, addr, + data); + break; + } + + return ret; +} +#endif /* CONFIG_COMPAT */ + +const struct user_regset_view *task_user_regset_view(struct task_struct *task) +{ +#ifdef CONFIG_COMPAT + if (is_compat_thread(task_thread_info(task))) + return &user_aarch32_view; +#endif + return &user_aarch64_view; +} + +long arch_ptrace(struct task_struct *child, long request, + unsigned long addr, unsigned long data) +{ + return ptrace_request(child, request, addr, data); +} + + +static int __init ptrace_break_init(void) +{ + hook_debug_fault_code(DBG_ESR_EVT_BRK, arm64_break_trap, SIGTRAP, + TRAP_BRKPT, "ptrace BRK handler"); + return 0; +} +core_initcall(ptrace_break_init); + + +asmlinkage int syscall_trace(int dir, struct pt_regs *regs) +{ + unsigned long saved_reg; + + if (!test_thread_flag(TIF_SYSCALL_TRACE)) + return regs->syscallno; + + if (is_compat_task()) { + /* AArch32 uses ip (r12) for scratch */ + saved_reg = regs->regs[12]; + regs->regs[12] = dir; + } else { + /* + * Save X7. X7 is used to denote syscall entry/exit: + * X7 = 0 -> entry, = 1 -> exit + */ + saved_reg = regs->regs[7]; + regs->regs[7] = dir; + } + + if (dir) + tracehook_report_syscall_exit(regs, 0); + else if (tracehook_report_syscall_entry(regs)) + regs->syscallno = ~0UL; + + if (is_compat_task()) + regs->regs[12] = saved_reg; + else + regs->regs[7] = saved_reg; + + return regs->syscallno; +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c new file mode 100644 index 000000000000..48ffb9fb3fe3 --- /dev/null +++ b/arch/arm64/kernel/setup.c @@ -0,0 +1,347 @@ +/* + * Based on arch/arm/kernel/setup.c + * + * Copyright (C) 1995-2001 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/stddef.h> +#include <linux/ioport.h> +#include <linux/delay.h> +#include <linux/utsname.h> +#include <linux/initrd.h> +#include <linux/console.h> +#include <linux/bootmem.h> +#include <linux/seq_file.h> +#include <linux/screen_info.h> +#include <linux/init.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/root_dev.h> +#include <linux/cpu.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/fs.h> +#include <linux/proc_fs.h> +#include <linux/memblock.h> +#include <linux/of_fdt.h> + +#include <asm/cputype.h> +#include <asm/elf.h> +#include <asm/cputable.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <asm/cacheflush.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> +#include <asm/memblock.h> + +unsigned int processor_id; +EXPORT_SYMBOL(processor_id); + +unsigned int elf_hwcap __read_mostly; +EXPORT_SYMBOL_GPL(elf_hwcap); + +static const char *cpu_name; +static const char *machine_name; +phys_addr_t __fdt_pointer __initdata; + +/* + * Standard memory resources + */ +static struct resource mem_res[] = { + { + .name = "Kernel code", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM + }, + { + .name = "Kernel data", + .start = 0, + .end = 0, + .flags = IORESOURCE_MEM + } +}; + +#define kernel_code mem_res[0] +#define kernel_data mem_res[1] + +void __init early_print(const char *str, ...) +{ + char buf[256]; + va_list ap; + + va_start(ap, str); + vsnprintf(buf, sizeof(buf), str, ap); + va_end(ap); + + printk("%s", buf); +} + +static void __init setup_processor(void) +{ + struct cpu_info *cpu_info; + + /* + * locate processor in the list of supported processor + * types. The linker builds this table for us from the + * entries in arch/arm/mm/proc.S + */ + cpu_info = lookup_processor_type(read_cpuid_id()); + if (!cpu_info) { + printk("CPU configuration botched (ID %08x), unable to continue.\n", + read_cpuid_id()); + while (1); + } + + cpu_name = cpu_info->cpu_name; + + printk("CPU: %s [%08x] revision %d\n", + cpu_name, read_cpuid_id(), read_cpuid_id() & 15); + + sprintf(init_utsname()->machine, "aarch64"); + elf_hwcap = 0; +} + +static void __init setup_machine_fdt(phys_addr_t dt_phys) +{ + struct boot_param_header *devtree; + unsigned long dt_root; + + /* Check we have a non-NULL DT pointer */ + if (!dt_phys) { + early_print("\n" + "Error: NULL or invalid device tree blob\n" + "The dtb must be 8-byte aligned and passed in the first 512MB of memory\n" + "\nPlease check your bootloader.\n"); + + while (true) + cpu_relax(); + + } + + devtree = phys_to_virt(dt_phys); + + /* Check device tree validity */ + if (be32_to_cpu(devtree->magic) != OF_DT_HEADER) { + early_print("\n" + "Error: invalid device tree blob at physical address 0x%p (virtual address 0x%p)\n" + "Expected 0x%x, found 0x%x\n" + "\nPlease check your bootloader.\n", + dt_phys, devtree, OF_DT_HEADER, + be32_to_cpu(devtree->magic)); + + while (true) + cpu_relax(); + } + + initial_boot_params = devtree; + dt_root = of_get_flat_dt_root(); + + machine_name = of_get_flat_dt_prop(dt_root, "model", NULL); + if (!machine_name) + machine_name = of_get_flat_dt_prop(dt_root, "compatible", NULL); + if (!machine_name) + machine_name = "<unknown>"; + pr_info("Machine: %s\n", machine_name); + + /* Retrieve various information from the /chosen node */ + of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line); + /* Initialize {size,address}-cells info */ + of_scan_flat_dt(early_init_dt_scan_root, NULL); + /* Setup memory, calling early_init_dt_add_memory_arch */ + of_scan_flat_dt(early_init_dt_scan_memory, NULL); +} + +void __init early_init_dt_add_memory_arch(u64 base, u64 size) +{ + size &= PAGE_MASK; + memblock_add(base, size); +} + +void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) +{ + return __va(memblock_alloc(size, align)); +} + +/* + * Limit the memory size that was specified via FDT. + */ +static int __init early_mem(char *p) +{ + phys_addr_t limit; + + if (!p) + return 1; + + limit = memparse(p, &p) & PAGE_MASK; + pr_notice("Memory limited to %lldMB\n", limit >> 20); + + memblock_enforce_memory_limit(limit); + + return 0; +} +early_param("mem", early_mem); + +static void __init request_standard_resources(void) +{ + struct memblock_region *region; + struct resource *res; + + kernel_code.start = virt_to_phys(_text); + kernel_code.end = virt_to_phys(_etext - 1); + kernel_data.start = virt_to_phys(_sdata); + kernel_data.end = virt_to_phys(_end - 1); + + for_each_memblock(memory, region) { + res = alloc_bootmem_low(sizeof(*res)); + res->name = "System RAM"; + res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region)); + res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1; + res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; + + request_resource(&iomem_resource, res); + + if (kernel_code.start >= res->start && + kernel_code.end <= res->end) + request_resource(res, &kernel_code); + if (kernel_data.start >= res->start && + kernel_data.end <= res->end) + request_resource(res, &kernel_data); + } +} + +void __init setup_arch(char **cmdline_p) +{ + setup_processor(); + + setup_machine_fdt(__fdt_pointer); + + init_mm.start_code = (unsigned long) _text; + init_mm.end_code = (unsigned long) _etext; + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = (unsigned long) _end; + + *cmdline_p = boot_command_line; + + parse_early_param(); + + arm64_memblock_init(); + + paging_init(); + request_standard_resources(); + + unflatten_device_tree(); + +#ifdef CONFIG_SMP + smp_init_cpus(); +#endif + +#ifdef CONFIG_VT +#if defined(CONFIG_VGA_CONSOLE) + conswitchp = &vga_con; +#elif defined(CONFIG_DUMMY_CONSOLE) + conswitchp = &dummy_con; +#endif +#endif +} + +static DEFINE_PER_CPU(struct cpu, cpu_data); + +static int __init topology_init(void) +{ + int i; + + for_each_possible_cpu(i) { + struct cpu *cpu = &per_cpu(cpu_data, i); + cpu->hotpluggable = 1; + register_cpu(cpu, i); + } + + return 0; +} +subsys_initcall(topology_init); + +static const char *hwcap_str[] = { + "fp", + "asimd", + NULL +}; + +static int c_show(struct seq_file *m, void *v) +{ + int i; + + seq_printf(m, "Processor\t: %s rev %d (%s)\n", + cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); + + for_each_online_cpu(i) { + /* + * glibc reads /proc/cpuinfo to determine the number of + * online processors, looking for lines beginning with + * "processor". Give glibc what it expects. + */ +#ifdef CONFIG_SMP + seq_printf(m, "processor\t: %d\n", i); +#endif + seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n", + loops_per_jiffy / (500000UL/HZ), + loops_per_jiffy / (5000UL/HZ) % 100); + } + + /* dump out the processor features */ + seq_puts(m, "Features\t: "); + + for (i = 0; hwcap_str[i]; i++) + if (elf_hwcap & (1 << i)) + seq_printf(m, "%s ", hwcap_str[i]); + + seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); + seq_printf(m, "CPU architecture: AArch64\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); + seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); + seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); + + seq_puts(m, "\n"); + + seq_printf(m, "Hardware\t: %s\n", machine_name); + + return 0; +} + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + return *pos < 1 ? (void *)1 : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return NULL; +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start = c_start, + .next = c_next, + .stop = c_stop, + .show = c_show +}; diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c new file mode 100644 index 000000000000..8807ba2cf262 --- /dev/null +++ b/arch/arm64/kernel/signal.c @@ -0,0 +1,437 @@ +/* + * Based on arch/arm/kernel/signal.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/errno.h> +#include <linux/signal.h> +#include <linux/personality.h> +#include <linux/freezer.h> +#include <linux/uaccess.h> +#include <linux/tracehook.h> +#include <linux/ratelimit.h> + +#include <asm/compat.h> +#include <asm/debug-monitors.h> +#include <asm/elf.h> +#include <asm/cacheflush.h> +#include <asm/ucontext.h> +#include <asm/unistd.h> +#include <asm/fpsimd.h> +#include <asm/signal32.h> +#include <asm/vdso.h> + +/* + * Do a signal return; undo the signal stack. These are aligned to 128-bit. + */ +struct rt_sigframe { + struct siginfo info; + struct ucontext uc; +}; + +static int preserve_fpsimd_context(struct fpsimd_context __user *ctx) +{ + struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; + int err; + + /* dump the hardware registers to the fpsimd_state structure */ + fpsimd_save_state(fpsimd); + + /* copy the FP and status/control registers */ + err = __copy_to_user(ctx->vregs, fpsimd->vregs, sizeof(fpsimd->vregs)); + __put_user_error(fpsimd->fpsr, &ctx->fpsr, err); + __put_user_error(fpsimd->fpcr, &ctx->fpcr, err); + + /* copy the magic/size information */ + __put_user_error(FPSIMD_MAGIC, &ctx->head.magic, err); + __put_user_error(sizeof(struct fpsimd_context), &ctx->head.size, err); + + return err ? -EFAULT : 0; +} + +static int restore_fpsimd_context(struct fpsimd_context __user *ctx) +{ + struct fpsimd_state fpsimd; + __u32 magic, size; + int err = 0; + + /* check the magic/size information */ + __get_user_error(magic, &ctx->head.magic, err); + __get_user_error(size, &ctx->head.size, err); + if (err) + return -EFAULT; + if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) + return -EINVAL; + + /* copy the FP and status/control registers */ + err = __copy_from_user(fpsimd.vregs, ctx->vregs, + sizeof(fpsimd.vregs)); + __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); + __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); + + /* load the hardware registers from the fpsimd_state structure */ + if (!err) { + preempt_disable(); + fpsimd_load_state(&fpsimd); + preempt_enable(); + } + + return err ? -EFAULT : 0; +} + +static int restore_sigframe(struct pt_regs *regs, + struct rt_sigframe __user *sf) +{ + sigset_t set; + int i, err; + struct aux_context __user *aux = + (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; + + err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); + if (err == 0) + set_current_blocked(&set); + + for (i = 0; i < 31; i++) + __get_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __get_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __get_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __get_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + /* + * Avoid sys_rt_sigreturn() restarting. + */ + regs->syscallno = ~0UL; + + err |= !valid_user_regs(®s->user_regs); + + if (err == 0) + err |= restore_fpsimd_context(&aux->fpsimd); + + return err; +} + +asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 128-bit boundary, then 'sp' should + * be word aligned here. + */ + if (regs->sp & 15) + goto badframe; + + frame = (struct rt_sigframe __user *)regs->sp; + + if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + goto badframe; + + if (restore_sigframe(regs, frame)) + goto badframe; + + if (do_sigaltstack(&frame->uc.uc_stack, + NULL, regs->sp) == -EFAULT) + goto badframe; + + return regs->regs[0]; + +badframe: + if (show_unhandled_signals) + pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", + current->comm, task_pid_nr(current), __func__, + regs->pc, regs->sp); + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, + unsigned long sp) +{ + return do_sigaltstack(uss, uoss, sp); +} + +static int setup_sigframe(struct rt_sigframe __user *sf, + struct pt_regs *regs, sigset_t *set) +{ + int i, err = 0; + struct aux_context __user *aux = + (struct aux_context __user *)sf->uc.uc_mcontext.__reserved; + + for (i = 0; i < 31; i++) + __put_user_error(regs->regs[i], &sf->uc.uc_mcontext.regs[i], + err); + __put_user_error(regs->sp, &sf->uc.uc_mcontext.sp, err); + __put_user_error(regs->pc, &sf->uc.uc_mcontext.pc, err); + __put_user_error(regs->pstate, &sf->uc.uc_mcontext.pstate, err); + + __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); + + err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set)); + + if (err == 0) + err |= preserve_fpsimd_context(&aux->fpsimd); + + /* set the "end" magic */ + __put_user_error(0, &aux->end.magic, err); + __put_user_error(0, &aux->end.size, err); + + return err; +} + +static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, + int framesize) +{ + unsigned long sp, sp_top; + void __user *frame; + + sp = sp_top = regs->sp; + + /* + * This is the X/Open sanctioned signal stack switching. + */ + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) + sp = sp_top = current->sas_ss_sp + current->sas_ss_size; + + /* room for stack frame (FP, LR) */ + sp -= 16; + + sp = (sp - framesize) & ~15; + frame = (void __user *)sp; + + /* + * Check that we can actually write to the signal frame. + */ + if (!access_ok(VERIFY_WRITE, frame, sp_top - sp)) + frame = NULL; + + return frame; +} + +static int setup_return(struct pt_regs *regs, struct k_sigaction *ka, + void __user *frame, int usig) +{ + int err = 0; + __sigrestore_t sigtramp; + unsigned long __user *sp = (unsigned long __user *)regs->sp; + + /* set up the stack frame */ + __put_user_error(regs->regs[29], sp - 2, err); + __put_user_error(regs->regs[30], sp - 1, err); + + regs->regs[0] = usig; + regs->regs[29] = regs->sp - 16; + regs->sp = (unsigned long)frame; + regs->pc = (unsigned long)ka->sa.sa_handler; + + if (ka->sa.sa_flags & SA_RESTORER) + sigtramp = ka->sa.sa_restorer; + else + sigtramp = VDSO_SYMBOL(current->mm->context.vdso, sigtramp); + + regs->regs[30] = (unsigned long)sigtramp; + + return err; +} + +static int setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct rt_sigframe __user *frame; + stack_t stack; + int err = 0; + + frame = get_sigframe(ka, regs, sizeof(*frame)); + if (!frame) + return 1; + + __put_user_error(0, &frame->uc.uc_flags, err); + __put_user_error(NULL, &frame->uc.uc_link, err); + + memset(&stack, 0, sizeof(stack)); + stack.ss_sp = (void __user *)current->sas_ss_sp; + stack.ss_flags = sas_ss_flags(regs->sp); + stack.ss_size = current->sas_ss_size; + err |= __copy_to_user(&frame->uc.uc_stack, &stack, sizeof(stack)); + + err |= setup_sigframe(frame, regs, set); + if (err == 0) + err = setup_return(regs, ka, frame, usig); + + if (err == 0 && ka->sa.sa_flags & SA_SIGINFO) { + err |= copy_siginfo_to_user(&frame->info, info); + regs->regs[1] = (unsigned long)&frame->info; + regs->regs[2] = (unsigned long)&frame->uc; + } + + return err; +} + +static void setup_restart_syscall(struct pt_regs *regs) +{ + if (is_compat_task()) + compat_setup_restart_syscall(regs); + else + regs->regs[8] = __NR_restart_syscall; +} + +/* + * OK, we're invoking a handler + */ +static void handle_signal(unsigned long sig, struct k_sigaction *ka, + siginfo_t *info, struct pt_regs *regs) +{ + struct thread_info *thread = current_thread_info(); + struct task_struct *tsk = current; + sigset_t *oldset = sigmask_to_save(); + int usig = sig; + int ret; + + /* + * translate the signal + */ + if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) + usig = thread->exec_domain->signal_invmap[usig]; + + /* + * Set up the stack frame + */ + if (is_compat_task()) { + if (ka->sa.sa_flags & SA_SIGINFO) + ret = compat_setup_rt_frame(usig, ka, info, oldset, + regs); + else + ret = compat_setup_frame(usig, ka, oldset, regs); + } else { + ret = setup_rt_frame(usig, ka, info, oldset, regs); + } + + /* + * Check that the resulting registers are actually sane. + */ + ret |= !valid_user_regs(®s->user_regs); + + if (ret != 0) { + force_sigsegv(sig, tsk); + return; + } + + /* + * Fast forward the stepping logic so we step into the signal + * handler. + */ + user_fastforward_single_step(tsk); + + signal_delivered(sig, info, ka, regs, 0); +} + +/* + * Note that 'init' is a special process: it doesn't get signals it doesn't + * want to handle. Thus you cannot kill init even with a SIGKILL even by + * mistake. + * + * Note that we go through the signals twice: once to check the signals that + * the kernel can handle, and then we build all the user-level signal handling + * stack-frames in one go after that. + */ +static void do_signal(struct pt_regs *regs) +{ + unsigned long continue_addr = 0, restart_addr = 0; + struct k_sigaction ka; + siginfo_t info; + int signr, retval = 0; + int syscall = (int)regs->syscallno; + + /* + * If we were from a system call, check for system call restarting... + */ + if (syscall >= 0) { + continue_addr = regs->pc; + restart_addr = continue_addr - (compat_thumb_mode(regs) ? 2 : 4); + retval = regs->regs[0]; + + /* + * Avoid additional syscall restarting via ret_to_user. + */ + regs->syscallno = ~0UL; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PC. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTART_RESTARTBLOCK: + regs->regs[0] = regs->orig_x0; + regs->pc = restart_addr; + break; + } + } + + /* + * Get the signal to deliver. When running under ptrace, at this point + * the debugger may change all of our registers. + */ + signr = get_signal_to_deliver(&info, &ka, regs, NULL); + if (signr > 0) { + /* + * Depending on the signal settings, we may need to revert the + * decision to restart the system call, but skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->pc == restart_addr && + (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || + (retval == -ERESTARTSYS && + !(ka.sa.sa_flags & SA_RESTART)))) { + regs->regs[0] = -EINTR; + regs->pc = continue_addr; + } + + handle_signal(signr, &ka, &info, regs); + return; + } + + /* + * Handle restarting a different system call. As above, if a debugger + * has chosen to restart at a different PC, ignore the restart. + */ + if (syscall >= 0 && regs->pc == restart_addr) { + if (retval == -ERESTART_RESTARTBLOCK) + setup_restart_syscall(regs); + user_rewind_single_step(current); + } + + restore_saved_sigmask(); +} + +asmlinkage void do_notify_resume(struct pt_regs *regs, + unsigned int thread_flags) +{ + if (thread_flags & _TIF_SIGPENDING) + do_signal(regs); + + if (thread_flags & _TIF_NOTIFY_RESUME) { + clear_thread_flag(TIF_NOTIFY_RESUME); + tracehook_notify_resume(regs); + } +} diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c new file mode 100644 index 000000000000..ac74c2f261e3 --- /dev/null +++ b/arch/arm64/kernel/signal32.c @@ -0,0 +1,876 @@ +/* + * Based on arch/arm/kernel/signal.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * Modified by Will Deacon <will.deacon@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define __SYSCALL_COMPAT + +#include <linux/compat.h> +#include <linux/signal.h> +#include <linux/syscalls.h> +#include <linux/ratelimit.h> + +#include <asm/fpsimd.h> +#include <asm/signal32.h> +#include <asm/uaccess.h> +#include <asm/unistd.h> + +typedef struct compat_siginfo { + int si_signo; + int si_errno; + int si_code; + + union { + /* The padding is the same size as AArch64. */ + int _pad[SI_PAD_SIZE]; + + /* kill() */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + } _kill; + + /* POSIX.1b timers */ + struct { + compat_timer_t _tid; /* timer id */ + int _overrun; /* overrun count */ + compat_sigval_t _sigval; /* same as below */ + int _sys_private; /* not to be passed to user */ + } _timer; + + /* POSIX.1b signals */ + struct { + compat_pid_t _pid; /* sender's pid */ + __compat_uid32_t _uid; /* sender's uid */ + compat_sigval_t _sigval; + } _rt; + + /* SIGCHLD */ + struct { + compat_pid_t _pid; /* which child */ + __compat_uid32_t _uid; /* sender's uid */ + int _status; /* exit code */ + compat_clock_t _utime; + compat_clock_t _stime; + } _sigchld; + + /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */ + struct { + compat_uptr_t _addr; /* faulting insn/memory ref. */ + short _addr_lsb; /* LSB of the reported address */ + } _sigfault; + + /* SIGPOLL */ + struct { + compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ + int _fd; + } _sigpoll; + } _sifields; +} compat_siginfo_t; + +struct compat_sigaction { + compat_uptr_t sa_handler; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; + compat_sigset_t sa_mask; +}; + +struct compat_old_sigaction { + compat_uptr_t sa_handler; + compat_old_sigset_t sa_mask; + compat_ulong_t sa_flags; + compat_uptr_t sa_restorer; +}; + +typedef struct compat_sigaltstack { + compat_uptr_t ss_sp; + int ss_flags; + compat_size_t ss_size; +} compat_stack_t; + +struct compat_sigcontext { + /* We always set these two fields to 0 */ + compat_ulong_t trap_no; + compat_ulong_t error_code; + + compat_ulong_t oldmask; + compat_ulong_t arm_r0; + compat_ulong_t arm_r1; + compat_ulong_t arm_r2; + compat_ulong_t arm_r3; + compat_ulong_t arm_r4; + compat_ulong_t arm_r5; + compat_ulong_t arm_r6; + compat_ulong_t arm_r7; + compat_ulong_t arm_r8; + compat_ulong_t arm_r9; + compat_ulong_t arm_r10; + compat_ulong_t arm_fp; + compat_ulong_t arm_ip; + compat_ulong_t arm_sp; + compat_ulong_t arm_lr; + compat_ulong_t arm_pc; + compat_ulong_t arm_cpsr; + compat_ulong_t fault_address; +}; + +struct compat_ucontext { + compat_ulong_t uc_flags; + struct compat_ucontext *uc_link; + compat_stack_t uc_stack; + struct compat_sigcontext uc_mcontext; + compat_sigset_t uc_sigmask; + int __unused[32 - (sizeof (compat_sigset_t) / sizeof (int))]; + compat_ulong_t uc_regspace[128] __attribute__((__aligned__(8))); +}; + +struct compat_vfp_sigframe { + compat_ulong_t magic; + compat_ulong_t size; + struct compat_user_vfp { + compat_u64 fpregs[32]; + compat_ulong_t fpscr; + } ufp; + struct compat_user_vfp_exc { + compat_ulong_t fpexc; + compat_ulong_t fpinst; + compat_ulong_t fpinst2; + } ufp_exc; +} __attribute__((__aligned__(8))); + +#define VFP_MAGIC 0x56465001 +#define VFP_STORAGE_SIZE sizeof(struct compat_vfp_sigframe) + +struct compat_aux_sigframe { + struct compat_vfp_sigframe vfp; + + /* Something that isn't a valid magic number for any coprocessor. */ + unsigned long end_magic; +} __attribute__((__aligned__(8))); + +struct compat_sigframe { + struct compat_ucontext uc; + compat_ulong_t retcode[2]; +}; + +struct compat_rt_sigframe { + struct compat_siginfo info; + struct compat_sigframe sig; +}; + +#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) + +/* + * For ARM syscalls, the syscall number has to be loaded into r7. + * We do not support an OABI userspace. + */ +#define MOV_R7_NR_SIGRETURN (0xe3a07000 | __NR_sigreturn) +#define SVC_SYS_SIGRETURN (0xef000000 | __NR_sigreturn) +#define MOV_R7_NR_RT_SIGRETURN (0xe3a07000 | __NR_rt_sigreturn) +#define SVC_SYS_RT_SIGRETURN (0xef000000 | __NR_rt_sigreturn) + +/* + * For Thumb syscalls, we also pass the syscall number via r7. We therefore + * need two 16-bit instructions. + */ +#define SVC_THUMB_SIGRETURN (((0xdf00 | __NR_sigreturn) << 16) | \ + 0x2700 | __NR_sigreturn) +#define SVC_THUMB_RT_SIGRETURN (((0xdf00 | __NR_rt_sigreturn) << 16) | \ + 0x2700 | __NR_rt_sigreturn) + +const compat_ulong_t aarch32_sigret_code[6] = { + /* + * AArch32 sigreturn code. + * We don't construct an OABI SWI - instead we just set the imm24 field + * to the EABI syscall number so that we create a sane disassembly. + */ + MOV_R7_NR_SIGRETURN, SVC_SYS_SIGRETURN, SVC_THUMB_SIGRETURN, + MOV_R7_NR_RT_SIGRETURN, SVC_SYS_RT_SIGRETURN, SVC_THUMB_RT_SIGRETURN, +}; + +static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set) +{ + compat_sigset_t cset; + + cset.sig[0] = set->sig[0] & 0xffffffffull; + cset.sig[1] = set->sig[0] >> 32; + + return copy_to_user(uset, &cset, sizeof(*uset)); +} + +static inline int get_sigset_t(sigset_t *set, + const compat_sigset_t __user *uset) +{ + compat_sigset_t s32; + + if (copy_from_user(&s32, uset, sizeof(*uset))) + return -EFAULT; + + set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); + return 0; +} + +int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +{ + int err; + + if (!access_ok(VERIFY_WRITE, to, sizeof(*to))) + return -EFAULT; + + /* If you change siginfo_t structure, please be sure + * this code is fixed accordingly. + * It should never copy any pad contained in the structure + * to avoid security leaks, but must copy the generic + * 3 ints plus the relevant union member. + * This routine must convert siginfo from 64bit to 32bit as well + * at the same time. + */ + err = __put_user(from->si_signo, &to->si_signo); + err |= __put_user(from->si_errno, &to->si_errno); + err |= __put_user((short)from->si_code, &to->si_code); + if (from->si_code < 0) + err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, + SI_PAD_SIZE); + else switch (from->si_code & __SI_MASK) { + case __SI_KILL: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + case __SI_TIMER: + err |= __put_user(from->si_tid, &to->si_tid); + err |= __put_user(from->si_overrun, &to->si_overrun); + err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, + &to->si_ptr); + break; + case __SI_POLL: + err |= __put_user(from->si_band, &to->si_band); + err |= __put_user(from->si_fd, &to->si_fd); + break; + case __SI_FAULT: + err |= __put_user((compat_uptr_t)(unsigned long)from->si_addr, + &to->si_addr); +#ifdef BUS_MCEERR_AO + /* + * Other callers might not initialize the si_lsb field, + * so check explicitely for the right codes here. + */ + if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) + err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); +#endif + break; + case __SI_CHLD: + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user(from->si_status, &to->si_status); + err |= __put_user(from->si_utime, &to->si_utime); + err |= __put_user(from->si_stime, &to->si_stime); + break; + case __SI_RT: /* This is not generated by the kernel as of now. */ + case __SI_MESGQ: /* But this is */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + break; + default: /* this is just in case for now ... */ + err |= __put_user(from->si_pid, &to->si_pid); + err |= __put_user(from->si_uid, &to->si_uid); + break; + } + return err; +} + +int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) +{ + memset(to, 0, sizeof *to); + + if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || + copy_from_user(to->_sifields._pad, + from->_sifields._pad, SI_PAD_SIZE)) + return -EFAULT; + + return 0; +} + +/* + * VFP save/restore code. + */ +static int compat_preserve_vfp_context(struct compat_vfp_sigframe __user *frame) +{ + struct fpsimd_state *fpsimd = ¤t->thread.fpsimd_state; + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr, fpexc; + int err = 0; + + /* + * Save the hardware registers to the fpsimd_state structure. + * Note that this also saves V16-31, which aren't visible + * in AArch32. + */ + fpsimd_save_state(fpsimd); + + /* Place structure header on the stack */ + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Now copy the FP registers. Since the registers are packed, + * we can copy the prefix we want (V0-V15) as it is. + * FIXME: Won't work if big endian. + */ + err |= __copy_to_user(&frame->ufp.fpregs, fpsimd->vregs, + sizeof(frame->ufp.fpregs)); + + /* Create an AArch32 fpscr from the fpsr and the fpcr. */ + fpscr = (fpsimd->fpsr & VFP_FPSCR_STAT_MASK) | + (fpsimd->fpcr & VFP_FPSCR_CTRL_MASK); + __put_user_error(fpscr, &frame->ufp.fpscr, err); + + /* + * The exception register aren't available so we fake up a + * basic FPEXC and zero everything else. + */ + fpexc = (1 << 30); + __put_user_error(fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(0, &frame->ufp_exc.fpinst, err); + __put_user_error(0, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int compat_restore_vfp_context(struct compat_vfp_sigframe __user *frame) +{ + struct fpsimd_state fpsimd; + compat_ulong_t magic = VFP_MAGIC; + compat_ulong_t size = VFP_STORAGE_SIZE; + compat_ulong_t fpscr; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the FP registers into the start of the fpsimd_state. + * FIXME: Won't work if big endian. + */ + err |= __copy_from_user(fpsimd.vregs, frame->ufp.fpregs, + sizeof(frame->ufp.fpregs)); + + /* Extract the fpsr and the fpcr from the fpscr */ + __get_user_error(fpscr, &frame->ufp.fpscr, err); + fpsimd.fpsr = fpscr & VFP_FPSCR_STAT_MASK; + fpsimd.fpcr = fpscr & VFP_FPSCR_CTRL_MASK; + + /* + * We don't need to touch the exception register, so + * reload the hardware state. + */ + if (!err) { + preempt_disable(); + fpsimd_load_state(&fpsimd); + preempt_enable(); + } + + return err ? -EFAULT : 0; +} + +/* + * atomically swap in the new signal mask, and wait for a signal. + */ +asmlinkage int compat_sys_sigsuspend(int restart, compat_ulong_t oldmask, + compat_old_sigset_t mask) +{ + sigset_t blocked; + + siginitset(¤t->blocked, mask); + return sigsuspend(&blocked); +} + +asmlinkage int compat_sys_sigaction(int sig, + const struct compat_old_sigaction __user *act, + struct compat_old_sigaction __user *oact) +{ + struct k_sigaction new_ka, old_ka; + int ret; + compat_old_sigset_t mask; + compat_uptr_t handler, restorer; + + if (act) { + if (!access_ok(VERIFY_READ, act, sizeof(*act)) || + __get_user(handler, &act->sa_handler) || + __get_user(restorer, &act->sa_restorer) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) + return -EFAULT; + + new_ka.sa.sa_handler = compat_ptr(handler); + new_ka.sa.sa_restorer = compat_ptr(restorer); + siginitset(&new_ka.sa.sa_mask, mask); + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + + if (!ret && oact) { + if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || + __put_user(ptr_to_compat(old_ka.sa.sa_handler), + &oact->sa_handler) || + __put_user(ptr_to_compat(old_ka.sa.sa_restorer), + &oact->sa_restorer) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) + return -EFAULT; + } + + return ret; +} + +asmlinkage int compat_sys_rt_sigaction(int sig, + const struct compat_sigaction __user *act, + struct compat_sigaction __user *oact, + compat_size_t sigsetsize) +{ + struct k_sigaction new_ka, old_ka; + int ret; + + /* XXX: Don't preclude handling different sized sigset_t's. */ + if (sigsetsize != sizeof(compat_sigset_t)) + return -EINVAL; + + if (act) { + compat_uptr_t handler, restorer; + + ret = get_user(handler, &act->sa_handler); + new_ka.sa.sa_handler = compat_ptr(handler); + ret |= get_user(restorer, &act->sa_restorer); + new_ka.sa.sa_restorer = compat_ptr(restorer); + ret |= get_sigset_t(&new_ka.sa.sa_mask, &act->sa_mask); + ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); + if (ret) + return -EFAULT; + } + + ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); + if (!ret && oact) { + ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); + ret |= put_sigset_t(&oact->sa_mask, &old_ka.sa.sa_mask); + ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); + } + return ret; +} + +int compat_do_sigaltstack(compat_uptr_t compat_uss, compat_uptr_t compat_uoss, + compat_ulong_t sp) +{ + compat_stack_t __user *newstack = compat_ptr(compat_uss); + compat_stack_t __user *oldstack = compat_ptr(compat_uoss); + compat_uptr_t ss_sp; + int ret; + mm_segment_t old_fs; + stack_t uss, uoss; + + /* Marshall the compat new stack into a stack_t */ + if (newstack) { + if (get_user(ss_sp, &newstack->ss_sp) || + __get_user(uss.ss_flags, &newstack->ss_flags) || + __get_user(uss.ss_size, &newstack->ss_size)) + return -EFAULT; + uss.ss_sp = compat_ptr(ss_sp); + } + + old_fs = get_fs(); + set_fs(KERNEL_DS); + /* The __user pointer casts are valid because of the set_fs() */ + ret = do_sigaltstack( + newstack ? (stack_t __user *) &uss : NULL, + oldstack ? (stack_t __user *) &uoss : NULL, + (unsigned long)sp); + set_fs(old_fs); + + /* Convert the old stack_t into a compat stack. */ + if (!ret && oldstack && + (put_user(ptr_to_compat(uoss.ss_sp), &oldstack->ss_sp) || + __put_user(uoss.ss_flags, &oldstack->ss_flags) || + __put_user(uoss.ss_size, &oldstack->ss_size))) + return -EFAULT; + return ret; +} + +static int compat_restore_sigframe(struct pt_regs *regs, + struct compat_sigframe __user *sf) +{ + int err; + sigset_t set; + struct compat_aux_sigframe __user *aux; + + err = get_sigset_t(&set, &sf->uc.uc_sigmask); + if (err == 0) { + sigdelsetmask(&set, ~_BLOCKABLE); + set_current_blocked(&set); + } + + __get_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); + __get_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); + __get_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); + __get_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); + __get_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); + __get_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); + __get_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); + __get_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); + __get_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); + __get_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); + __get_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); + __get_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); + __get_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); + __get_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); + __get_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); + __get_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); + __get_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + + /* + * Avoid compat_sys_sigreturn() restarting. + */ + regs->syscallno = ~0UL; + + err |= !valid_user_regs(®s->user_regs); + + aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + if (err == 0) + err |= compat_restore_vfp_context(&aux->vfp); + + return err; +} + +asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) +{ + struct compat_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->compat_sp & 7) + goto badframe; + + frame = (struct compat_sigframe __user *)regs->compat_sp; + + if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + goto badframe; + + if (compat_restore_sigframe(regs, frame)) + goto badframe; + + return regs->regs[0]; + +badframe: + if (show_unhandled_signals) + pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", + current->comm, task_pid_nr(current), __func__, + regs->pc, regs->sp); + force_sig(SIGSEGV, current); + return 0; +} + +asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) +{ + struct compat_rt_sigframe __user *frame; + + /* Always make any pending restarted system calls return -EINTR */ + current_thread_info()->restart_block.fn = do_no_restart_syscall; + + /* + * Since we stacked the signal on a 64-bit boundary, + * then 'sp' should be word aligned here. If it's + * not, then the user is trying to mess with us. + */ + if (regs->compat_sp & 7) + goto badframe; + + frame = (struct compat_rt_sigframe __user *)regs->compat_sp; + + if (!access_ok(VERIFY_READ, frame, sizeof (*frame))) + goto badframe; + + if (compat_restore_sigframe(regs, &frame->sig)) + goto badframe; + + if (compat_do_sigaltstack(ptr_to_compat(&frame->sig.uc.uc_stack), + ptr_to_compat((void __user *)NULL), + regs->compat_sp) == -EFAULT) + goto badframe; + + return regs->regs[0]; + +badframe: + if (show_unhandled_signals) + pr_info_ratelimited("%s[%d]: bad frame in %s: pc=%08llx sp=%08llx\n", + current->comm, task_pid_nr(current), __func__, + regs->pc, regs->sp); + force_sig(SIGSEGV, current); + return 0; +} + +static inline void __user *compat_get_sigframe(struct k_sigaction *ka, + struct pt_regs *regs, + int framesize) +{ + compat_ulong_t sp = regs->compat_sp; + void __user *frame; + + /* + * This is the X/Open sanctioned signal stack switching. + */ + if ((ka->sa.sa_flags & SA_ONSTACK) && !sas_ss_flags(sp)) + sp = current->sas_ss_sp + current->sas_ss_size; + + /* + * ATPCS B01 mandates 8-byte alignment + */ + frame = compat_ptr((compat_uptr_t)((sp - framesize) & ~7)); + + /* + * Check that we can actually write to the signal frame. + */ + if (!access_ok(VERIFY_WRITE, frame, framesize)) + frame = NULL; + + return frame; +} + +static int compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, + compat_ulong_t __user *rc, void __user *frame, + int usig) +{ + compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); + compat_ulong_t retcode; + compat_ulong_t spsr = regs->pstate & ~PSR_f; + int thumb; + + /* Check if the handler is written for ARM or Thumb */ + thumb = handler & 1; + + if (thumb) { + spsr |= COMPAT_PSR_T_BIT; + spsr &= ~COMPAT_PSR_IT_MASK; + } else { + spsr &= ~COMPAT_PSR_T_BIT; + } + + if (ka->sa.sa_flags & SA_RESTORER) { + retcode = ptr_to_compat(ka->sa.sa_restorer); + } else { + /* Set up sigreturn pointer */ + unsigned int idx = thumb << 1; + + if (ka->sa.sa_flags & SA_SIGINFO) + idx += 3; + + retcode = AARCH32_VECTORS_BASE + + AARCH32_KERN_SIGRET_CODE_OFFSET + + (idx << 2) + thumb; + } + + regs->regs[0] = usig; + regs->compat_sp = ptr_to_compat(frame); + regs->compat_lr = retcode; + regs->pc = handler; + regs->pstate = spsr; + + return 0; +} + +static int compat_setup_sigframe(struct compat_sigframe __user *sf, + struct pt_regs *regs, sigset_t *set) +{ + struct compat_aux_sigframe __user *aux; + int err = 0; + + __put_user_error(regs->regs[0], &sf->uc.uc_mcontext.arm_r0, err); + __put_user_error(regs->regs[1], &sf->uc.uc_mcontext.arm_r1, err); + __put_user_error(regs->regs[2], &sf->uc.uc_mcontext.arm_r2, err); + __put_user_error(regs->regs[3], &sf->uc.uc_mcontext.arm_r3, err); + __put_user_error(regs->regs[4], &sf->uc.uc_mcontext.arm_r4, err); + __put_user_error(regs->regs[5], &sf->uc.uc_mcontext.arm_r5, err); + __put_user_error(regs->regs[6], &sf->uc.uc_mcontext.arm_r6, err); + __put_user_error(regs->regs[7], &sf->uc.uc_mcontext.arm_r7, err); + __put_user_error(regs->regs[8], &sf->uc.uc_mcontext.arm_r8, err); + __put_user_error(regs->regs[9], &sf->uc.uc_mcontext.arm_r9, err); + __put_user_error(regs->regs[10], &sf->uc.uc_mcontext.arm_r10, err); + __put_user_error(regs->regs[11], &sf->uc.uc_mcontext.arm_fp, err); + __put_user_error(regs->regs[12], &sf->uc.uc_mcontext.arm_ip, err); + __put_user_error(regs->compat_sp, &sf->uc.uc_mcontext.arm_sp, err); + __put_user_error(regs->compat_lr, &sf->uc.uc_mcontext.arm_lr, err); + __put_user_error(regs->pc, &sf->uc.uc_mcontext.arm_pc, err); + __put_user_error(regs->pstate, &sf->uc.uc_mcontext.arm_cpsr, err); + + __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); + __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.error_code, err); + __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); + __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); + + err |= put_sigset_t(&sf->uc.uc_sigmask, set); + + aux = (struct compat_aux_sigframe __user *) sf->uc.uc_regspace; + + if (err == 0) + err |= compat_preserve_vfp_context(&aux->vfp); + __put_user_error(0, &aux->end_magic, err); + + return err; +} + +/* + * 32-bit signal handling routines called from signal.c + */ +int compat_setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info, + sigset_t *set, struct pt_regs *regs) +{ + struct compat_rt_sigframe __user *frame; + compat_stack_t stack; + int err = 0; + + frame = compat_get_sigframe(ka, regs, sizeof(*frame)); + + if (!frame) + return 1; + + err |= copy_siginfo_to_user32(&frame->info, info); + + __put_user_error(0, &frame->sig.uc.uc_flags, err); + __put_user_error(NULL, &frame->sig.uc.uc_link, err); + + memset(&stack, 0, sizeof(stack)); + stack.ss_sp = (compat_uptr_t)current->sas_ss_sp; + stack.ss_flags = sas_ss_flags(regs->compat_sp); + stack.ss_size = current->sas_ss_size; + err |= __copy_to_user(&frame->sig.uc.uc_stack, &stack, sizeof(stack)); + + err |= compat_setup_sigframe(&frame->sig, regs, set); + if (err == 0) + err = compat_setup_return(regs, ka, frame->sig.retcode, frame, + usig); + + if (err == 0) { + regs->regs[1] = (compat_ulong_t)(unsigned long)&frame->info; + regs->regs[2] = (compat_ulong_t)(unsigned long)&frame->sig.uc; + } + + return err; +} + +int compat_setup_frame(int usig, struct k_sigaction *ka, sigset_t *set, + struct pt_regs *regs) +{ + struct compat_sigframe __user *frame; + int err = 0; + + frame = compat_get_sigframe(ka, regs, sizeof(*frame)); + + if (!frame) + return 1; + + __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err); + + err |= compat_setup_sigframe(frame, regs, set); + if (err == 0) + err = compat_setup_return(regs, ka, frame->retcode, frame, usig); + + return err; +} + +/* + * RT signals don't have generic compat wrappers. + * See arch/powerpc/kernel/signal_32.c + */ +asmlinkage int compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, + compat_sigset_t __user *oset, + compat_size_t sigsetsize) +{ + sigset_t s; + sigset_t __user *up; + int ret; + mm_segment_t old_fs = get_fs(); + + if (set) { + if (get_sigset_t(&s, set)) + return -EFAULT; + } + + set_fs(KERNEL_DS); + /* This is valid because of the set_fs() */ + up = (sigset_t __user *) &s; + ret = sys_rt_sigprocmask(how, set ? up : NULL, oset ? up : NULL, + sigsetsize); + set_fs(old_fs); + if (ret) + return ret; + if (oset) { + if (put_sigset_t(oset, &s)) + return -EFAULT; + } + return 0; +} + +asmlinkage int compat_sys_rt_sigpending(compat_sigset_t __user *set, + compat_size_t sigsetsize) +{ + sigset_t s; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + /* The __user pointer cast is valid because of the set_fs() */ + ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); + set_fs(old_fs); + if (!ret) { + if (put_sigset_t(set, &s)) + return -EFAULT; + } + return ret; +} + +asmlinkage int compat_sys_rt_sigqueueinfo(int pid, int sig, + compat_siginfo_t __user *uinfo) +{ + siginfo_t info; + int ret; + mm_segment_t old_fs = get_fs(); + + ret = copy_siginfo_from_user32(&info, uinfo); + if (unlikely(ret)) + return ret; + + set_fs (KERNEL_DS); + /* The __user pointer cast is valid because of the set_fs() */ + ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); + set_fs (old_fs); + return ret; +} + +void compat_setup_restart_syscall(struct pt_regs *regs) +{ + regs->regs[7] = __NR_restart_syscall; +} diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c new file mode 100644 index 000000000000..b711525be21f --- /dev/null +++ b/arch/arm64/kernel/smp.c @@ -0,0 +1,469 @@ +/* + * SMP initialisation and IPI support + * Based on arch/arm/kernel/smp.c + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/sched.h> +#include <linux/interrupt.h> +#include <linux/cache.h> +#include <linux/profile.h> +#include <linux/errno.h> +#include <linux/mm.h> +#include <linux/err.h> +#include <linux/cpu.h> +#include <linux/smp.h> +#include <linux/seq_file.h> +#include <linux/irq.h> +#include <linux/percpu.h> +#include <linux/clockchips.h> +#include <linux/completion.h> +#include <linux/of.h> + +#include <asm/atomic.h> +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <asm/mmu_context.h> +#include <asm/pgtable.h> +#include <asm/pgalloc.h> +#include <asm/processor.h> +#include <asm/sections.h> +#include <asm/tlbflush.h> +#include <asm/ptrace.h> +#include <asm/mmu_context.h> + +/* + * as from 2.5, kernels no longer have an init_tasks structure + * so we need some other way of telling a new secondary core + * where to place its SVC stack + */ +struct secondary_data secondary_data; +volatile unsigned long secondary_holding_pen_release = -1; + +enum ipi_msg_type { + IPI_RESCHEDULE, + IPI_CALL_FUNC, + IPI_CALL_FUNC_SINGLE, + IPI_CPU_STOP, +}; + +static DEFINE_RAW_SPINLOCK(boot_lock); + +/* + * Write secondary_holding_pen_release in a way that is guaranteed to be + * visible to all observers, irrespective of whether they're taking part + * in coherency or not. This is necessary for the hotplug code to work + * reliably. + */ +static void __cpuinit write_pen_release(int val) +{ + void *start = (void *)&secondary_holding_pen_release; + unsigned long size = sizeof(secondary_holding_pen_release); + + secondary_holding_pen_release = val; + __flush_dcache_area(start, size); +} + +/* + * Boot a secondary CPU, and assign it the specified idle task. + * This also gives us the initial stack to use for this CPU. + */ +static int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) +{ + unsigned long timeout; + + /* + * Set synchronisation state between this boot processor + * and the secondary one + */ + raw_spin_lock(&boot_lock); + + /* + * Update the pen release flag. + */ + write_pen_release(cpu); + + /* + * Send an event, causing the secondaries to read pen_release. + */ + sev(); + + timeout = jiffies + (1 * HZ); + while (time_before(jiffies, timeout)) { + if (secondary_holding_pen_release == -1UL) + break; + udelay(10); + } + + /* + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ + raw_spin_unlock(&boot_lock); + + return secondary_holding_pen_release != -1 ? -ENOSYS : 0; +} + +static DECLARE_COMPLETION(cpu_running); + +int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *idle) +{ + int ret; + + /* + * We need to tell the secondary core where to find its stack and the + * page tables. + */ + secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; + __flush_dcache_area(&secondary_data, sizeof(secondary_data)); + + /* + * Now bring the CPU into our world. + */ + ret = boot_secondary(cpu, idle); + if (ret == 0) { + /* + * CPU was successfully started, wait for it to come online or + * time out. + */ + wait_for_completion_timeout(&cpu_running, + msecs_to_jiffies(1000)); + + if (!cpu_online(cpu)) { + pr_crit("CPU%u: failed to come online\n", cpu); + ret = -EIO; + } + } else { + pr_err("CPU%u: failed to boot: %d\n", cpu, ret); + } + + secondary_data.stack = NULL; + + return ret; +} + +/* + * This is the secondary CPU boot entry. We're using this CPUs + * idle thread stack, but a set of temporary page tables. + */ +asmlinkage void __cpuinit secondary_start_kernel(void) +{ + struct mm_struct *mm = &init_mm; + unsigned int cpu = smp_processor_id(); + + printk("CPU%u: Booted secondary processor\n", cpu); + + /* + * All kernel threads share the same mm context; grab a + * reference and switch to it. + */ + atomic_inc(&mm->mm_count); + current->active_mm = mm; + cpumask_set_cpu(cpu, mm_cpumask(mm)); + + /* + * TTBR0 is only used for the identity mapping at this stage. Make it + * point to zero page to avoid speculatively fetching new entries. + */ + cpu_set_reserved_ttbr0(); + flush_tlb_all(); + + preempt_disable(); + trace_hardirqs_off(); + + /* + * Let the primary processor know we're out of the + * pen, then head off into the C entry point + */ + write_pen_release(-1); + + /* + * Synchronise with the boot thread. + */ + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); + + /* + * Enable local interrupts. + */ + notify_cpu_starting(cpu); + local_irq_enable(); + local_fiq_enable(); + + /* + * OK, now it's safe to let the boot CPU continue. Wait for + * the CPU migration code to notice that the CPU is online + * before we continue. + */ + set_cpu_online(cpu, true); + while (!cpu_active(cpu)) + cpu_relax(); + + /* + * OK, it's off to the idle thread for us + */ + cpu_idle(); +} + +void __init smp_cpus_done(unsigned int max_cpus) +{ + unsigned long bogosum = loops_per_jiffy * num_online_cpus(); + + pr_info("SMP: Total of %d processors activated (%lu.%02lu BogoMIPS).\n", + num_online_cpus(), bogosum / (500000/HZ), + (bogosum / (5000/HZ)) % 100); +} + +void __init smp_prepare_boot_cpu(void) +{ +} + +static void (*smp_cross_call)(const struct cpumask *, unsigned int); +static phys_addr_t cpu_release_addr[NR_CPUS]; + +/* + * Enumerate the possible CPU set from the device tree. + */ +void __init smp_init_cpus(void) +{ + const char *enable_method; + struct device_node *dn = NULL; + int cpu = 0; + + while ((dn = of_find_node_by_type(dn, "cpu"))) { + if (cpu >= NR_CPUS) + goto next; + + /* + * We currently support only the "spin-table" enable-method. + */ + enable_method = of_get_property(dn, "enable-method", NULL); + if (!enable_method || strcmp(enable_method, "spin-table")) { + pr_err("CPU %d: missing or invalid enable-method property: %s\n", + cpu, enable_method); + goto next; + } + + /* + * Determine the address from which the CPU is polling. + */ + if (of_property_read_u64(dn, "cpu-release-addr", + &cpu_release_addr[cpu])) { + pr_err("CPU %d: missing or invalid cpu-release-addr property\n", + cpu); + goto next; + } + + set_cpu_possible(cpu, true); +next: + cpu++; + } + + /* sanity check */ + if (cpu > NR_CPUS) + pr_warning("no. of cores (%d) greater than configured maximum of %d - clipping\n", + cpu, NR_CPUS); +} + +void __init smp_prepare_cpus(unsigned int max_cpus) +{ + int cpu; + void **release_addr; + unsigned int ncores = num_possible_cpus(); + + /* + * are we trying to boot more cores than exist? + */ + if (max_cpus > ncores) + max_cpus = ncores; + + /* + * Initialise the present map (which describes the set of CPUs + * actually populated at the present time) and release the + * secondaries from the bootloader. + */ + for_each_possible_cpu(cpu) { + if (max_cpus == 0) + break; + + if (!cpu_release_addr[cpu]) + continue; + + release_addr = __va(cpu_release_addr[cpu]); + release_addr[0] = (void *)__pa(secondary_holding_pen); + __flush_dcache_area(release_addr, sizeof(release_addr[0])); + + set_cpu_present(cpu, true); + max_cpus--; + } + + /* + * Send an event to wake up the secondaries. + */ + sev(); +} + + +void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) +{ + smp_cross_call = fn; +} + +void arch_send_call_function_ipi_mask(const struct cpumask *mask) +{ + smp_cross_call(mask, IPI_CALL_FUNC); +} + +void arch_send_call_function_single_ipi(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); +} + +static const char *ipi_types[NR_IPI] = { +#define S(x,s) [x - IPI_RESCHEDULE] = s + S(IPI_RESCHEDULE, "Rescheduling interrupts"), + S(IPI_CALL_FUNC, "Function call interrupts"), + S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), + S(IPI_CPU_STOP, "CPU stop interrupts"), +}; + +void show_ipi_list(struct seq_file *p, int prec) +{ + unsigned int cpu, i; + + for (i = 0; i < NR_IPI; i++) { + seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i + IPI_RESCHEDULE, + prec >= 4 ? " " : ""); + for_each_present_cpu(cpu) + seq_printf(p, "%10u ", + __get_irq_stat(cpu, ipi_irqs[i])); + seq_printf(p, " %s\n", ipi_types[i]); + } +} + +u64 smp_irq_stat_cpu(unsigned int cpu) +{ + u64 sum = 0; + int i; + + for (i = 0; i < NR_IPI; i++) + sum += __get_irq_stat(cpu, ipi_irqs[i]); + + return sum; +} + +static DEFINE_RAW_SPINLOCK(stop_lock); + +/* + * ipi_cpu_stop - handle IPI from smp_send_stop() + */ +static void ipi_cpu_stop(unsigned int cpu) +{ + if (system_state == SYSTEM_BOOTING || + system_state == SYSTEM_RUNNING) { + raw_spin_lock(&stop_lock); + pr_crit("CPU%u: stopping\n", cpu); + dump_stack(); + raw_spin_unlock(&stop_lock); + } + + set_cpu_online(cpu, false); + + local_fiq_disable(); + local_irq_disable(); + + while (1) + cpu_relax(); +} + +/* + * Main handler for inter-processor interrupts + */ +void handle_IPI(int ipinr, struct pt_regs *regs) +{ + unsigned int cpu = smp_processor_id(); + struct pt_regs *old_regs = set_irq_regs(regs); + + if (ipinr >= IPI_RESCHEDULE && ipinr < IPI_RESCHEDULE + NR_IPI) + __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_RESCHEDULE]); + + switch (ipinr) { + case IPI_RESCHEDULE: + scheduler_ipi(); + break; + + case IPI_CALL_FUNC: + irq_enter(); + generic_smp_call_function_interrupt(); + irq_exit(); + break; + + case IPI_CALL_FUNC_SINGLE: + irq_enter(); + generic_smp_call_function_single_interrupt(); + irq_exit(); + break; + + case IPI_CPU_STOP: + irq_enter(); + ipi_cpu_stop(cpu); + irq_exit(); + break; + + default: + pr_crit("CPU%u: Unknown IPI message 0x%x\n", cpu, ipinr); + break; + } + set_irq_regs(old_regs); +} + +void smp_send_reschedule(int cpu) +{ + smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); +} + +void smp_send_stop(void) +{ + unsigned long timeout; + + if (num_online_cpus() > 1) { + cpumask_t mask; + + cpumask_copy(&mask, cpu_online_mask); + cpu_clear(smp_processor_id(), mask); + + smp_cross_call(&mask, IPI_CPU_STOP); + } + + /* Wait up to one second for other CPUs to stop */ + timeout = USEC_PER_SEC; + while (num_online_cpus() > 1 && timeout--) + udelay(1); + + if (num_online_cpus() > 1) + pr_warning("SMP: failed to stop secondary CPUs\n"); +} + +/* + * not supported here + */ +int setup_profiling_timer(unsigned int multiplier) +{ + return -EINVAL; +} diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c new file mode 100644 index 000000000000..d25459ff57fc --- /dev/null +++ b/arch/arm64/kernel/stacktrace.c @@ -0,0 +1,127 @@ +/* + * Stack tracing support + * + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/stacktrace.h> + +#include <asm/stacktrace.h> + +/* + * AArch64 PCS assigns the frame pointer to x29. + * + * A simple function prologue looks like this: + * sub sp, sp, #0x10 + * stp x29, x30, [sp] + * mov x29, sp + * + * A simple function epilogue looks like this: + * mov sp, x29 + * ldp x29, x30, [sp] + * add sp, sp, #0x10 + */ +int unwind_frame(struct stackframe *frame) +{ + unsigned long high, low; + unsigned long fp = frame->fp; + + low = frame->sp; + high = ALIGN(low, THREAD_SIZE); + + if (fp < low || fp > high || fp & 0xf) + return -EINVAL; + + frame->sp = fp + 0x10; + frame->fp = *(unsigned long *)(fp); + frame->pc = *(unsigned long *)(fp + 8); + + return 0; +} + +void notrace walk_stackframe(struct stackframe *frame, + int (*fn)(struct stackframe *, void *), void *data) +{ + while (1) { + int ret; + + if (fn(frame, data)) + break; + ret = unwind_frame(frame); + if (ret < 0) + break; + } +} +EXPORT_SYMBOL(walk_stackframe); + +#ifdef CONFIG_STACKTRACE +struct stack_trace_data { + struct stack_trace *trace; + unsigned int no_sched_functions; + unsigned int skip; +}; + +static int save_trace(struct stackframe *frame, void *d) +{ + struct stack_trace_data *data = d; + struct stack_trace *trace = data->trace; + unsigned long addr = frame->pc; + + if (data->no_sched_functions && in_sched_functions(addr)) + return 0; + if (data->skip) { + data->skip--; + return 0; + } + + trace->entries[trace->nr_entries++] = addr; + + return trace->nr_entries >= trace->max_entries; +} + +void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) +{ + struct stack_trace_data data; + struct stackframe frame; + + data.trace = trace; + data.skip = trace->skip; + + if (tsk != current) { + data.no_sched_functions = 1; + frame.fp = thread_saved_fp(tsk); + frame.sp = thread_saved_sp(tsk); + frame.pc = thread_saved_pc(tsk); + } else { + register unsigned long current_sp asm("sp"); + data.no_sched_functions = 0; + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_sp; + frame.pc = (unsigned long)save_stack_trace_tsk; + } + + walk_stackframe(&frame, save_trace, &data); + if (trace->nr_entries < trace->max_entries) + trace->entries[trace->nr_entries++] = ULONG_MAX; +} + +void save_stack_trace(struct stack_trace *trace) +{ + save_stack_trace_tsk(current, trace); +} +EXPORT_SYMBOL_GPL(save_stack_trace); +#endif diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c new file mode 100644 index 000000000000..905fcfb0ddd0 --- /dev/null +++ b/arch/arm64/kernel/sys.c @@ -0,0 +1,138 @@ +/* + * AArch64-specific system calls implementation + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/export.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscalls.h> + +/* + * Clone a task - this clones the calling program thread. + */ +asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, + int __user *parent_tidptr, unsigned long tls_val, + int __user *child_tidptr, struct pt_regs *regs) +{ + if (!newsp) + newsp = regs->sp; + /* 16-byte aligned stack mandatory on AArch64 */ + if (newsp & 15) + return -EINVAL; + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); +} + +/* + * sys_execve() executes a new program. + */ +asmlinkage long sys_execve(const char __user *filenamei, + const char __user *const __user *argv, + const char __user *const __user *envp, + struct pt_regs *regs) +{ + long error; + char * filename; + + filename = getname(filenamei); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = do_execve(filename, argv, envp, regs); + putname(filename); +out: + return error; +} + +int kernel_execve(const char *filename, + const char *const argv[], + const char *const envp[]) +{ + struct pt_regs regs; + int ret; + + memset(®s, 0, sizeof(struct pt_regs)); + ret = do_execve(filename, + (const char __user *const __user *)argv, + (const char __user *const __user *)envp, ®s); + if (ret < 0) + goto out; + + /* + * Save argc to the register structure for userspace. + */ + regs.regs[0] = ret; + + /* + * We were successful. We won't be returning to our caller, but + * instead to user space by manipulating the kernel stack. + */ + asm( "add x0, %0, %1\n\t" + "mov x1, %2\n\t" + "mov x2, %3\n\t" + "bl memmove\n\t" /* copy regs to top of stack */ + "mov x27, #0\n\t" /* not a syscall */ + "mov x28, %0\n\t" /* thread structure */ + "mov sp, x0\n\t" /* reposition stack pointer */ + "b ret_to_user" + : + : "r" (current_thread_info()), + "Ir" (THREAD_START_SP - sizeof(regs)), + "r" (®s), + "Ir" (sizeof(regs)) + : "x0", "x1", "x2", "x27", "x28", "x30", "memory"); + + out: + return ret; +} +EXPORT_SYMBOL(kernel_execve); + +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, off_t off) +{ + if (offset_in_page(off) != 0) + return -EINVAL; + + return sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); +} + +/* + * Wrappers to pass the pt_regs argument. + */ +#define sys_execve sys_execve_wrapper +#define sys_clone sys_clone_wrapper +#define sys_rt_sigreturn sys_rt_sigreturn_wrapper +#define sys_sigaltstack sys_sigaltstack_wrapper + +#include <asm/syscalls.h> + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = sym, + +/* + * The sys_call_table array must be 4K aligned to be accessible from + * kernel/entry.S. + */ +void *sys_call_table[__NR_syscalls] __aligned(4096) = { + [0 ... __NR_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd.h> +}; diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/sys32.S new file mode 100644 index 000000000000..5e4dc93cc31f --- /dev/null +++ b/arch/arm64/kernel/sys32.S @@ -0,0 +1,282 @@ +/* + * Compat system call wrappers + * + * Copyright (C) 2012 ARM Ltd. + * Authors: Will Deacon <will.deacon@arm.com> + * Catalin Marinas <catalin.marinas@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/linkage.h> + +#include <asm/assembler.h> +#include <asm/asm-offsets.h> + +/* + * System call wrappers for the AArch32 compatibility layer. + */ +compat_sys_fork_wrapper: + mov x0, sp + b compat_sys_fork +ENDPROC(compat_sys_fork_wrapper) + +compat_sys_vfork_wrapper: + mov x0, sp + b compat_sys_vfork +ENDPROC(compat_sys_vfork_wrapper) + +compat_sys_execve_wrapper: + mov x3, sp + b compat_sys_execve +ENDPROC(compat_sys_execve_wrapper) + +compat_sys_clone_wrapper: + mov x5, sp + b compat_sys_clone +ENDPROC(compat_sys_clone_wrapper) + +compat_sys_sigreturn_wrapper: + mov x0, sp + mov x27, #0 // prevent syscall restart handling (why) + b compat_sys_sigreturn +ENDPROC(compat_sys_sigreturn_wrapper) + +compat_sys_rt_sigreturn_wrapper: + mov x0, sp + mov x27, #0 // prevent syscall restart handling (why) + b compat_sys_rt_sigreturn +ENDPROC(compat_sys_rt_sigreturn_wrapper) + +compat_sys_sigaltstack_wrapper: + ldr x2, [sp, #S_COMPAT_SP] + b compat_do_sigaltstack +ENDPROC(compat_sys_sigaltstack_wrapper) + +compat_sys_statfs64_wrapper: + mov w3, #84 + cmp w1, #88 + csel w1, w3, w1, eq + b compat_sys_statfs64 +ENDPROC(compat_sys_statfs64_wrapper) + +compat_sys_fstatfs64_wrapper: + mov w3, #84 + cmp w1, #88 + csel w1, w3, w1, eq + b compat_sys_fstatfs64 +ENDPROC(compat_sys_fstatfs64_wrapper) + +/* + * Wrappers for AArch32 syscalls that either take 64-bit parameters + * in registers or that take 32-bit parameters which require sign + * extension. + */ +compat_sys_lseek_wrapper: + sxtw x1, w1 + b sys_lseek +ENDPROC(compat_sys_lseek_wrapper) + +compat_sys_pread64_wrapper: + orr x3, x4, x5, lsl #32 + b sys_pread64 +ENDPROC(compat_sys_pread64_wrapper) + +compat_sys_pwrite64_wrapper: + orr x3, x4, x5, lsl #32 + b sys_pwrite64 +ENDPROC(compat_sys_pwrite64_wrapper) + +compat_sys_truncate64_wrapper: + orr x1, x2, x3, lsl #32 + b sys_truncate +ENDPROC(compat_sys_truncate64_wrapper) + +compat_sys_ftruncate64_wrapper: + orr x1, x2, x3, lsl #32 + b sys_ftruncate +ENDPROC(compat_sys_ftruncate64_wrapper) + +compat_sys_readahead_wrapper: + orr x1, x2, x3, lsl #32 + mov w2, w4 + b sys_readahead +ENDPROC(compat_sys_readahead_wrapper) + +compat_sys_lookup_dcookie: + orr x0, x0, x1, lsl #32 + mov w1, w2 + mov w2, w3 + b sys_lookup_dcookie +ENDPROC(compat_sys_lookup_dcookie) + +compat_sys_fadvise64_64_wrapper: + mov w6, w1 + orr x1, x2, x3, lsl #32 + orr x2, x4, x5, lsl #32 + mov w3, w6 + b sys_fadvise64_64 +ENDPROC(compat_sys_fadvise64_64_wrapper) + +compat_sys_sync_file_range2_wrapper: + orr x2, x2, x3, lsl #32 + orr x3, x4, x5, lsl #32 + b sys_sync_file_range2 +ENDPROC(compat_sys_sync_file_range2_wrapper) + +compat_sys_fallocate_wrapper: + orr x2, x2, x3, lsl #32 + orr x3, x4, x5, lsl #32 + b sys_fallocate +ENDPROC(compat_sys_fallocate_wrapper) + +compat_sys_fanotify_mark_wrapper: + orr x2, x2, x3, lsl #32 + mov w3, w4 + mov w4, w5 + b sys_fanotify_mark +ENDPROC(compat_sys_fanotify_mark_wrapper) + +/* + * Use the compat system call wrappers. + */ +#define sys_fork compat_sys_fork_wrapper +#define sys_open compat_sys_open +#define sys_execve compat_sys_execve_wrapper +#define sys_lseek compat_sys_lseek_wrapper +#define sys_mount compat_sys_mount +#define sys_ptrace compat_sys_ptrace +#define sys_times compat_sys_times +#define sys_ioctl compat_sys_ioctl +#define sys_fcntl compat_sys_fcntl +#define sys_ustat compat_sys_ustat +#define sys_sigaction compat_sys_sigaction +#define sys_sigsuspend compat_sys_sigsuspend +#define sys_sigpending compat_sys_sigpending +#define sys_setrlimit compat_sys_setrlimit +#define sys_getrusage compat_sys_getrusage +#define sys_gettimeofday compat_sys_gettimeofday +#define sys_settimeofday compat_sys_settimeofday +#define sys_statfs compat_sys_statfs +#define sys_fstatfs compat_sys_fstatfs +#define sys_setitimer compat_sys_setitimer +#define sys_getitimer compat_sys_getitimer +#define sys_newstat compat_sys_newstat +#define sys_newlstat compat_sys_newlstat +#define sys_newfstat compat_sys_newfstat +#define sys_wait4 compat_sys_wait4 +#define sys_sysinfo compat_sys_sysinfo +#define sys_sigreturn compat_sys_sigreturn_wrapper +#define sys_clone compat_sys_clone_wrapper +#define sys_adjtimex compat_sys_adjtimex +#define sys_sigprocmask compat_sys_sigprocmask +#define sys_getdents compat_sys_getdents +#define sys_select compat_sys_select +#define sys_readv compat_sys_readv +#define sys_writev compat_sys_writev +#define sys_sysctl compat_sys_sysctl +#define sys_sched_rr_get_interval compat_sys_sched_rr_get_interval +#define sys_nanosleep compat_sys_nanosleep +#define sys_rt_sigreturn compat_sys_rt_sigreturn_wrapper +#define sys_rt_sigaction compat_sys_rt_sigaction +#define sys_rt_sigprocmask compat_sys_rt_sigprocmask +#define sys_rt_sigpending compat_sys_rt_sigpending +#define sys_rt_sigtimedwait compat_sys_rt_sigtimedwait +#define sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo +#define sys_rt_sigsuspend compat_sys_rt_sigsuspend +#define sys_pread64 compat_sys_pread64_wrapper +#define sys_pwrite64 compat_sys_pwrite64_wrapper +#define sys_sigaltstack compat_sys_sigaltstack_wrapper +#define sys_sendfile compat_sys_sendfile +#define sys_vfork compat_sys_vfork_wrapper +#define sys_getrlimit compat_sys_getrlimit +#define sys_mmap2 sys_mmap_pgoff +#define sys_truncate64 compat_sys_truncate64_wrapper +#define sys_ftruncate64 compat_sys_ftruncate64_wrapper +#define sys_getdents64 compat_sys_getdents64 +#define sys_fcntl64 compat_sys_fcntl64 +#define sys_readahead compat_sys_readahead_wrapper +#define sys_futex compat_sys_futex +#define sys_sched_setaffinity compat_sys_sched_setaffinity +#define sys_sched_getaffinity compat_sys_sched_getaffinity +#define sys_io_setup compat_sys_io_setup +#define sys_io_getevents compat_sys_io_getevents +#define sys_io_submit compat_sys_io_submit +#define sys_lookup_dcookie compat_sys_lookup_dcookie +#define sys_timer_create compat_sys_timer_create +#define sys_timer_settime compat_sys_timer_settime +#define sys_timer_gettime compat_sys_timer_gettime +#define sys_clock_settime compat_sys_clock_settime +#define sys_clock_gettime compat_sys_clock_gettime +#define sys_clock_getres compat_sys_clock_getres +#define sys_clock_nanosleep compat_sys_clock_nanosleep +#define sys_statfs64 compat_sys_statfs64_wrapper +#define sys_fstatfs64 compat_sys_fstatfs64_wrapper +#define sys_utimes compat_sys_utimes +#define sys_fadvise64_64 compat_sys_fadvise64_64_wrapper +#define sys_mq_open compat_sys_mq_open +#define sys_mq_timedsend compat_sys_mq_timedsend +#define sys_mq_timedreceive compat_sys_mq_timedreceive +#define sys_mq_notify compat_sys_mq_notify +#define sys_mq_getsetattr compat_sys_mq_getsetattr +#define sys_waitid compat_sys_waitid +#define sys_recv compat_sys_recv +#define sys_recvfrom compat_sys_recvfrom +#define sys_setsockopt compat_sys_setsockopt +#define sys_getsockopt compat_sys_getsockopt +#define sys_sendmsg compat_sys_sendmsg +#define sys_recvmsg compat_sys_recvmsg +#define sys_semctl compat_sys_semctl +#define sys_msgsnd compat_sys_msgsnd +#define sys_msgrcv compat_sys_msgrcv +#define sys_msgctl compat_sys_msgctl +#define sys_shmat compat_sys_shmat +#define sys_shmctl compat_sys_shmctl +#define sys_keyctl compat_sys_keyctl +#define sys_semtimedop compat_sys_semtimedop +#define sys_mbind compat_sys_mbind +#define sys_get_mempolicy compat_sys_get_mempolicy +#define sys_set_mempolicy compat_sys_set_mempolicy +#define sys_openat compat_sys_openat +#define sys_futimesat compat_sys_futimesat +#define sys_pselect6 compat_sys_pselect6 +#define sys_ppoll compat_sys_ppoll +#define sys_set_robust_list compat_sys_set_robust_list +#define sys_get_robust_list compat_sys_get_robust_list +#define sys_sync_file_range2 compat_sys_sync_file_range2_wrapper +#define sys_vmsplice compat_sys_vmsplice +#define sys_move_pages compat_sys_move_pages +#define sys_epoll_pwait compat_sys_epoll_pwait +#define sys_kexec_load compat_sys_kexec_load +#define sys_utimensat compat_sys_utimensat +#define sys_signalfd compat_sys_signalfd +#define sys_fallocate compat_sys_fallocate_wrapper +#define sys_timerfd_settime compat_sys_timerfd_settime +#define sys_timerfd_gettime compat_sys_timerfd_gettime +#define sys_signalfd4 compat_sys_signalfd4 +#define sys_preadv compat_sys_preadv +#define sys_pwritev compat_sys_pwritev +#define sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo +#define sys_recvmmsg compat_sys_recvmmsg +#define sys_fanotify_mark compat_sys_fanotify_mark_wrapper + +#undef __SYSCALL +#define __SYSCALL(x, y) .quad y // x +#define __SYSCALL_COMPAT + +/* + * The system calls table must be 4KB aligned. + */ + .align 12 +ENTRY(compat_sys_call_table) +#include <asm/unistd.h> diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c new file mode 100644 index 000000000000..967e92fdff01 --- /dev/null +++ b/arch/arm64/kernel/sys_compat.c @@ -0,0 +1,164 @@ +/* + * Based on arch/arm/kernel/sys_arm.c + * + * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c + * Copyright (C) 1995, 1996 Russell King. + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define __SYSCALL_COMPAT + +#include <linux/compat.h> +#include <linux/personality.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +#include <asm/cacheflush.h> +#include <asm/unistd.h> + +asmlinkage int compat_sys_fork(struct pt_regs *regs) +{ + return do_fork(SIGCHLD, regs->compat_sp, regs, 0, NULL, NULL); +} + +asmlinkage int compat_sys_clone(unsigned long clone_flags, unsigned long newsp, + int __user *parent_tidptr, int tls_val, + int __user *child_tidptr, struct pt_regs *regs) +{ + if (!newsp) + newsp = regs->compat_sp; + + return do_fork(clone_flags, newsp, regs, 0, parent_tidptr, child_tidptr); +} + +asmlinkage int compat_sys_vfork(struct pt_regs *regs) +{ + return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->compat_sp, + regs, 0, NULL, NULL); +} + +asmlinkage int compat_sys_execve(const char __user *filenamei, + compat_uptr_t argv, compat_uptr_t envp, + struct pt_regs *regs) +{ + int error; + char * filename; + + filename = getname(filenamei); + error = PTR_ERR(filename); + if (IS_ERR(filename)) + goto out; + error = compat_do_execve(filename, compat_ptr(argv), compat_ptr(envp), + regs); + putname(filename); +out: + return error; +} + +asmlinkage int compat_sys_sched_rr_get_interval(compat_pid_t pid, + struct compat_timespec __user *interval) +{ + struct timespec t; + int ret; + mm_segment_t old_fs = get_fs(); + + set_fs(KERNEL_DS); + ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t); + set_fs(old_fs); + if (put_compat_timespec(&t, interval)) + return -EFAULT; + return ret; +} + +asmlinkage int compat_sys_sendfile(int out_fd, int in_fd, + compat_off_t __user *offset, s32 count) +{ + mm_segment_t old_fs = get_fs(); + int ret; + off_t of; + + if (offset && get_user(of, offset)) + return -EFAULT; + + set_fs(KERNEL_DS); + ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL, + count); + set_fs(old_fs); + + if (offset && put_user(of, offset)) + return -EFAULT; + return ret; +} + +static inline void +do_compat_cache_op(unsigned long start, unsigned long end, int flags) +{ + struct mm_struct *mm = current->active_mm; + struct vm_area_struct *vma; + + if (end < start || flags) + return; + + down_read(&mm->mmap_sem); + vma = find_vma(mm, start); + if (vma && vma->vm_start < end) { + if (start < vma->vm_start) + start = vma->vm_start; + if (end > vma->vm_end) + end = vma->vm_end; + up_read(&mm->mmap_sem); + __flush_cache_user_range(start & PAGE_MASK, PAGE_ALIGN(end)); + return; + } + up_read(&mm->mmap_sem); +} + +/* + * Handle all unrecognised system calls. + */ +long compat_arm_syscall(struct pt_regs *regs) +{ + unsigned int no = regs->regs[7]; + + switch (no) { + /* + * Flush a region from virtual address 'r0' to virtual address 'r1' + * _exclusive_. There is no alignment requirement on either address; + * user space does not need to know the hardware cache layout. + * + * r2 contains flags. It should ALWAYS be passed as ZERO until it + * is defined to be something else. For now we ignore it, but may + * the fires of hell burn in your belly if you break this rule. ;) + * + * (at a later date, we may want to allow this call to not flush + * various aspects of the cache. Passing '0' will guarantee that + * everything necessary gets flushed to maintain consistency in + * the specified region). + */ + case __ARM_NR_compat_cacheflush: + do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]); + return 0; + + case __ARM_NR_compat_set_tls: + current->thread.tp_value = regs->regs[0]; + asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); + return 0; + + default: + return -ENOSYS; + } +} diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c new file mode 100644 index 000000000000..3b4b7258f492 --- /dev/null +++ b/arch/arm64/kernel/time.c @@ -0,0 +1,65 @@ +/* + * Based on arch/arm/kernel/time.c + * + * Copyright (C) 1991, 1992, 1995 Linus Torvalds + * Modifications for ARM (C) 1994-2001 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/time.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/smp.h> +#include <linux/timex.h> +#include <linux/errno.h> +#include <linux/profile.h> +#include <linux/syscore_ops.h> +#include <linux/timer.h> +#include <linux/irq.h> + +#include <clocksource/arm_generic.h> + +#include <asm/thread_info.h> +#include <asm/stacktrace.h> + +#ifdef CONFIG_SMP +unsigned long profile_pc(struct pt_regs *regs) +{ + struct stackframe frame; + + if (!in_lock_functions(regs->pc)) + return regs->pc; + + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; + do { + int ret = unwind_frame(&frame); + if (ret < 0) + return 0; + } while (in_lock_functions(frame.pc)); + + return frame.pc; +} +EXPORT_SYMBOL(profile_pc); +#endif + +void __init time_init(void) +{ + arm_generic_timer_init(); +} diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c new file mode 100644 index 000000000000..3883f842434f --- /dev/null +++ b/arch/arm64/kernel/traps.c @@ -0,0 +1,348 @@ +/* + * Based on arch/arm/kernel/traps.c + * + * Copyright (C) 1995-2009 Russell King + * Copyright (C) 2012 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/signal.h> +#include <linux/personality.h> +#include <linux/kallsyms.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> +#include <linux/hardirq.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/kexec.h> +#include <linux/delay.h> +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/syscalls.h> + +#include <asm/atomic.h> +#include <asm/traps.h> +#include <asm/stacktrace.h> +#include <asm/exception.h> +#include <asm/system_misc.h> + +static const char *handler[]= { + "Synchronous Abort", + "IRQ", + "FIQ", + "Error" +}; + +int show_unhandled_signals = 1; + +/* + * Dump out the contents of some memory nicely... + */ +static void dump_mem(const char *lvl, const char *str, unsigned long bottom, + unsigned long top) +{ + unsigned long first; + mm_segment_t fs; + int i; + + /* + * We need to switch to kernel mode so that we can use __get_user + * to safely read from kernel space. Note that we now dump the + * code first, just in case the backtrace kills us. + */ + fs = get_fs(); + set_fs(KERNEL_DS); + + printk("%s%s(0x%016lx to 0x%016lx)\n", lvl, str, bottom, top); + + for (first = bottom & ~31; first < top; first += 32) { + unsigned long p; + char str[sizeof(" 12345678") * 8 + 1]; + + memset(str, ' ', sizeof(str)); + str[sizeof(str) - 1] = '\0'; + + for (p = first, i = 0; i < 8 && p < top; i++, p += 4) { + if (p >= bottom && p < top) { + unsigned int val; + if (__get_user(val, (unsigned int *)p) == 0) + sprintf(str + i * 9, " %08x", val); + else + sprintf(str + i * 9, " ????????"); + } + } + printk("%s%04lx:%s\n", lvl, first & 0xffff, str); + } + + set_fs(fs); +} + +static void dump_backtrace_entry(unsigned long where, unsigned long stack) +{ + print_ip_sym(where); + if (in_exception_text(where)) + dump_mem("", "Exception stack", stack, + stack + sizeof(struct pt_regs)); +} + +static void dump_instr(const char *lvl, struct pt_regs *regs) +{ + unsigned long addr = instruction_pointer(regs); + mm_segment_t fs; + char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str; + int i; + + /* + * We need to switch to kernel mode so that we can use __get_user + * to safely read from kernel space. Note that we now dump the + * code first, just in case the backtrace kills us. + */ + fs = get_fs(); + set_fs(KERNEL_DS); + + for (i = -4; i < 1; i++) { + unsigned int val, bad; + + bad = __get_user(val, &((u32 *)addr)[i]); + + if (!bad) + p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val); + else { + p += sprintf(p, "bad PC value"); + break; + } + } + printk("%sCode: %s\n", lvl, str); + + set_fs(fs); +} + +static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) +{ + struct stackframe frame; + const register unsigned long current_sp asm ("sp"); + + pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); + + if (!tsk) + tsk = current; + + if (regs) { + frame.fp = regs->regs[29]; + frame.sp = regs->sp; + frame.pc = regs->pc; + } else if (tsk == current) { + frame.fp = (unsigned long)__builtin_frame_address(0); + frame.sp = current_sp; + frame.pc = (unsigned long)dump_backtrace; + } else { + /* + * task blocked in __switch_to + */ + frame.fp = thread_saved_fp(tsk); + frame.sp = thread_saved_sp(tsk); + frame.pc = thread_saved_pc(tsk); + } + + printk("Call trace:\n"); + while (1) { + unsigned long where = frame.pc; + int ret; + + ret = unwind_frame(&frame); + if (ret < 0) + break; + dump_backtrace_entry(where, frame.sp); + } +} + +void dump_stack(void) +{ + dump_backtrace(NULL, NULL); +} + +EXPORT_SYMBOL(dump_stack); + +void show_stack(struct task_struct *tsk, unsigned long *sp) +{ + dump_backtrace(NULL, tsk); + barrier(); +} + +#ifdef CONFIG_PREEMPT +#define S_PREEMPT " PREEMPT" +#else +#define S_PREEMPT "" +#endif +#ifdef CONFIG_SMP +#define S_SMP " SMP" +#else +#define S_SMP "" +#endif + +static int __die(const char *str, int err, struct thread_info *thread, + struct pt_regs *regs) +{ + struct task_struct *tsk = thread->task; + static int die_counter; + int ret; + + pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", + str, err, ++die_counter); + + /* trap and error numbers are mostly meaningless on ARM */ + ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + + print_modules(); + __show_regs(regs); + pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n", + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1); + + if (!user_mode(regs) || in_interrupt()) { + dump_mem(KERN_EMERG, "Stack: ", regs->sp, + THREAD_SIZE + (unsigned long)task_stack_page(tsk)); + dump_backtrace(regs, tsk); + dump_instr(KERN_EMERG, regs); + } + + return ret; +} + +static DEFINE_RAW_SPINLOCK(die_lock); + +/* + * This function is protected against re-entrancy. + */ +void die(const char *str, struct pt_regs *regs, int err) +{ + struct thread_info *thread = current_thread_info(); + int ret; + + oops_enter(); + + raw_spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + ret = __die(str, err, thread, regs); + + if (regs && kexec_should_crash(thread->task)) + crash_kexec(regs); + + bust_spinlocks(0); + add_taint(TAINT_DIE); + raw_spin_unlock_irq(&die_lock); + oops_exit(); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); +} + +void arm64_notify_die(const char *str, struct pt_regs *regs, + struct siginfo *info, int err) +{ + if (user_mode(regs)) + force_sig_info(info->si_signo, info, current); + else + die(str, regs, err); +} + +asmlinkage void __exception do_undefinstr(struct pt_regs *regs) +{ + siginfo_t info; + void __user *pc = (void __user *)instruction_pointer(regs); + +#ifdef CONFIG_COMPAT + /* check for AArch32 breakpoint instructions */ + if (compat_user_mode(regs) && aarch32_break_trap(regs) == 0) + return; +#endif + + if (show_unhandled_signals) { + pr_info("%s[%d]: undefined instruction: pc=%p\n", + current->comm, task_pid_nr(current), pc); + dump_instr(KERN_INFO, regs); + } + + info.si_signo = SIGILL; + info.si_errno = 0; + info.si_code = ILL_ILLOPC; + info.si_addr = pc; + + arm64_notify_die("Oops - undefined instruction", regs, &info, 0); +} + +long compat_arm_syscall(struct pt_regs *regs); + +asmlinkage long do_ni_syscall(struct pt_regs *regs) +{ +#ifdef CONFIG_COMPAT + long ret; + if (is_compat_task()) { + ret = compat_arm_syscall(regs); + if (ret != -ENOSYS) + return ret; + } +#endif + + if (show_unhandled_signals) { + pr_info("%s[%d]: syscall %d\n", current->comm, + task_pid_nr(current), (int)regs->syscallno); + dump_instr("", regs); + if (user_mode(regs)) + __show_regs(regs); + } + + return sys_ni_syscall(); +} + +/* + * bad_mode handles the impossible case in the exception vector. + */ +asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) +{ + console_verbose(); + + pr_crit("Bad mode in %s handler detected, code 0x%08x\n", + handler[reason], esr); + + die("Oops - bad mode", regs, 0); + local_irq_disable(); + panic("bad mode"); +} + +void __pte_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pte %016lx.\n", file, line, val); +} + +void __pmd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pmd %016lx.\n", file, line, val); +} + +void __pgd_error(const char *file, int line, unsigned long val) +{ + printk("%s:%d: bad pgd %016lx.\n", file, line, val); +} + +void __init trap_init(void) +{ + return; +} diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c new file mode 100644 index 000000000000..17948fc7d663 --- /dev/null +++ b/arch/arm64/kernel/vdso.c @@ -0,0 +1,261 @@ +/* + * VDSO implementation for AArch64 and vector page setup for AArch32. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/kernel.h> +#include <linux/clocksource.h> +#include <linux/elf.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/mm.h> +#include <linux/sched.h> +#include <linux/signal.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <asm/cacheflush.h> +#include <asm/signal32.h> +#include <asm/vdso.h> +#include <asm/vdso_datapage.h> + +extern char vdso_start, vdso_end; +static unsigned long vdso_pages; +static struct page **vdso_pagelist; + +/* + * The vDSO data page. + */ +static union { + struct vdso_data data; + u8 page[PAGE_SIZE]; +} vdso_data_store __page_aligned_data; +struct vdso_data *vdso_data = &vdso_data_store.data; + +#ifdef CONFIG_COMPAT +/* + * Create and map the vectors page for AArch32 tasks. + */ +static struct page *vectors_page[1]; + +static int alloc_vectors_page(void) +{ + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; + unsigned long vpage; + + vpage = get_zeroed_page(GFP_ATOMIC); + + if (!vpage) + return -ENOMEM; + + /* kuser helpers */ + memcpy((void *)vpage + 0x1000 - kuser_sz, __kuser_helper_start, + kuser_sz); + + /* sigreturn code */ + memcpy((void *)vpage + AARCH32_KERN_SIGRET_CODE_OFFSET, + aarch32_sigret_code, sizeof(aarch32_sigret_code)); + + flush_icache_range(vpage, vpage + PAGE_SIZE); + vectors_page[0] = virt_to_page(vpage); + + return 0; +} +arch_initcall(alloc_vectors_page); + +int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long addr = AARCH32_VECTORS_BASE; + int ret; + + down_write(&mm->mmap_sem); + current->mm->context.vdso = (void *)addr; + + /* Map vectors page at the high address. */ + ret = install_special_mapping(mm, addr, PAGE_SIZE, + VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, + vectors_page); + + up_write(&mm->mmap_sem); + + return ret; +} +#endif /* CONFIG_COMPAT */ + +static int __init vdso_init(void) +{ + struct page *pg; + char *vbase; + int i, ret = 0; + + vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; + pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", + vdso_pages + 1, vdso_pages, 1L, &vdso_start); + + /* Allocate the vDSO pagelist, plus a page for the data. */ + vdso_pagelist = kzalloc(sizeof(struct page *) * (vdso_pages + 1), + GFP_KERNEL); + if (vdso_pagelist == NULL) { + pr_err("Failed to allocate vDSO pagelist!\n"); + return -ENOMEM; + } + + /* Grab the vDSO code pages. */ + for (i = 0; i < vdso_pages; i++) { + pg = virt_to_page(&vdso_start + i*PAGE_SIZE); + ClearPageReserved(pg); + get_page(pg); + vdso_pagelist[i] = pg; + } + + /* Sanity check the shared object header. */ + vbase = vmap(vdso_pagelist, 1, 0, PAGE_KERNEL); + if (vbase == NULL) { + pr_err("Failed to map vDSO pagelist!\n"); + return -ENOMEM; + } else if (memcmp(vbase, "\177ELF", 4)) { + pr_err("vDSO is not a valid ELF object!\n"); + ret = -EINVAL; + goto unmap; + } + + /* Grab the vDSO data page. */ + pg = virt_to_page(vdso_data); + get_page(pg); + vdso_pagelist[i] = pg; + +unmap: + vunmap(vbase); + return ret; +} +arch_initcall(vdso_init); + +int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp) +{ + struct mm_struct *mm = current->mm; + unsigned long vdso_base, vdso_mapping_len; + int ret; + + /* Be sure to map the data page */ + vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; + + down_write(&mm->mmap_sem); + vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); + if (IS_ERR_VALUE(vdso_base)) { + ret = vdso_base; + goto up_fail; + } + mm->context.vdso = (void *)vdso_base; + + ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, + VM_READ|VM_EXEC| + VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, + vdso_pagelist); + if (ret) { + mm->context.vdso = NULL; + goto up_fail; + } + +up_fail: + up_write(&mm->mmap_sem); + + return ret; +} + +const char *arch_vma_name(struct vm_area_struct *vma) +{ + /* + * We can re-use the vdso pointer in mm_context_t for identifying + * the vectors page for compat applications. The vDSO will always + * sit above TASK_UNMAPPED_BASE and so we don't need to worry about + * it conflicting with the vectors base. + */ + if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { +#ifdef CONFIG_COMPAT + if (vma->vm_start == AARCH32_VECTORS_BASE) + return "[vectors]"; +#endif + return "[vdso]"; + } + + return NULL; +} + +/* + * We define AT_SYSINFO_EHDR, so we need these function stubs to keep + * Linux happy. + */ +int in_gate_area_no_mm(unsigned long addr) +{ + return 0; +} + +int in_gate_area(struct mm_struct *mm, unsigned long addr) +{ + return 0; +} + +struct vm_area_struct *get_gate_vma(struct mm_struct *mm) +{ + return NULL; +} + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +void update_vsyscall(struct timespec *ts, struct timespec *wtm, + struct clocksource *clock, u32 mult) +{ + struct timespec xtime_coarse; + u32 use_syscall = strcmp(clock->name, "arch_sys_counter"); + + ++vdso_data->tb_seq_count; + smp_wmb(); + + xtime_coarse = __current_kernel_time(); + vdso_data->use_syscall = use_syscall; + vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; + vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; + + if (!use_syscall) { + vdso_data->cs_cycle_last = clock->cycle_last; + vdso_data->xtime_clock_sec = ts->tv_sec; + vdso_data->xtime_clock_nsec = ts->tv_nsec; + vdso_data->cs_mult = mult; + vdso_data->cs_shift = clock->shift; + vdso_data->wtm_clock_sec = wtm->tv_sec; + vdso_data->wtm_clock_nsec = wtm->tv_nsec; + } + + smp_wmb(); + ++vdso_data->tb_seq_count; +} + +void update_vsyscall_tz(void) +{ + ++vdso_data->tb_seq_count; + smp_wmb(); + vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; + vdso_data->tz_dsttime = sys_tz.tz_dsttime; + smp_wmb(); + ++vdso_data->tb_seq_count; +} diff --git a/arch/arm64/kernel/vdso/.gitignore b/arch/arm64/kernel/vdso/.gitignore new file mode 100644 index 000000000000..b8cc94e9698b --- /dev/null +++ b/arch/arm64/kernel/vdso/.gitignore @@ -0,0 +1,2 @@ +vdso.lds +vdso-offsets.h diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile new file mode 100644 index 000000000000..d8064af42e62 --- /dev/null +++ b/arch/arm64/kernel/vdso/Makefile @@ -0,0 +1,63 @@ +# +# Building a vDSO image for AArch64. +# +# Author: Will Deacon <will.deacon@arm.com> +# Heavily based on the vDSO Makefiles for other archs. +# + +obj-vdso := gettimeofday.o note.o sigreturn.o + +# Build rules +targets := $(obj-vdso) vdso.so vdso.so.dbg +obj-vdso := $(addprefix $(obj)/, $(obj-vdso)) + +ccflags-y := -shared -fno-common -fno-builtin +ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + +obj-y += vdso.o +extra-y += vdso.lds vdso-offsets.h +CPPFLAGS_vdso.lds += -P -C -U$(ARCH) + +# Force dependency (incbin is bad) +$(obj)/vdso.o : $(obj)/vdso.so + +# Link rule for the .so file, .lds has to be first +$(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) + $(call if_changed,vdsold) + +# Strip rule for the .so file +$(obj)/%.so: OBJCOPYFLAGS := -S +$(obj)/%.so: $(obj)/%.so.dbg FORCE + $(call if_changed,objcopy) + +# Generate VDSO offsets using helper script +gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh +quiet_cmd_vdsosym = VDSOSYM $@ +define cmd_vdsosym + $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \ + cp $@ include/generated/ +endef + +$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE + $(call if_changed,vdsosym) + +# Assembly rules for the .S files +$(obj-vdso): %.o: %.S + $(call if_changed_dep,vdsoas) + +# Actual build commands +quiet_cmd_vdsold = VDSOL $@ + cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@ +quiet_cmd_vdsoas = VDSOA $@ + cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< + +# Install commands for the unstripped file +quiet_cmd_vdso_install = INSTALL $@ + cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ + +vdso.so: $(obj)/vdso.so.dbg + @mkdir -p $(MODLIB)/vdso + $(call cmd,vdso_install) + +vdso_install: vdso.so diff --git a/arch/arm64/kernel/vdso/gen_vdso_offsets.sh b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh new file mode 100755 index 000000000000..01924ff071ad --- /dev/null +++ b/arch/arm64/kernel/vdso/gen_vdso_offsets.sh @@ -0,0 +1,15 @@ +#!/bin/sh + +# +# Match symbols in the DSO that look like VDSO_*; produce a header file +# of constant offsets into the shared object. +# +# Doing this inside the Makefile will break the $(filter-out) function, +# causing Kbuild to rebuild the vdso-offsets header file every time. +# +# Author: Will Deacon <will.deacon@arm.com +# + +LC_ALL=C +sed -n -e 's/^00*/0/' -e \ +'s/^\([0-9a-fA-F]*\) . VDSO_\([a-zA-Z0-9_]*\)$/\#define vdso_offset_\2\t0x\1/p' diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S new file mode 100644 index 000000000000..dcb8c203a3b2 --- /dev/null +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -0,0 +1,242 @@ +/* + * Userspace implementations of gettimeofday() and friends. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> +#include <asm/asm-offsets.h> +#include <asm/unistd.h> + +#define NSEC_PER_SEC_LO16 0xca00 +#define NSEC_PER_SEC_HI16 0x3b9a + +vdso_data .req x6 +use_syscall .req w7 +seqcnt .req w8 + + .macro seqcnt_acquire +9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT] + tbnz seqcnt, #0, 9999b + dmb ishld + ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL] + .endm + + .macro seqcnt_read, cnt + dmb ishld + ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT] + .endm + + .macro seqcnt_check, cnt, fail + cmp \cnt, seqcnt + b.ne \fail + .endm + + .text + +/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */ +ENTRY(__kernel_gettimeofday) + .cfi_startproc + mov x2, x30 + .cfi_register x30, x2 + + /* Acquire the sequence counter and get the timespec. */ + adr vdso_data, _vdso_data +1: seqcnt_acquire + cbnz use_syscall, 4f + + /* If tv is NULL, skip to the timezone code. */ + cbz x0, 2f + bl __do_get_tspec + seqcnt_check w13, 1b + + /* Convert ns to us. */ + mov x11, #1000 + udiv x10, x10, x11 + stp x9, x10, [x0, #TVAL_TV_SEC] +2: + /* If tz is NULL, return 0. */ + cbz x1, 3f + ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] + seqcnt_read w13 + seqcnt_check w13, 1b + stp w4, w5, [x1, #TZ_MINWEST] +3: + mov x0, xzr + ret x2 +4: + /* Syscall fallback. */ + mov x8, #__NR_gettimeofday + svc #0 + ret x2 + .cfi_endproc +ENDPROC(__kernel_gettimeofday) + +/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */ +ENTRY(__kernel_clock_gettime) + .cfi_startproc + cmp w0, #CLOCK_REALTIME + ccmp w0, #CLOCK_MONOTONIC, #0x4, ne + b.ne 2f + + mov x2, x30 + .cfi_register x30, x2 + + /* Get kernel timespec. */ + adr vdso_data, _vdso_data +1: seqcnt_acquire + cbnz use_syscall, 7f + + bl __do_get_tspec + seqcnt_check w13, 1b + + cmp w0, #CLOCK_MONOTONIC + b.ne 6f + + /* Get wtm timespec. */ + ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] + + /* Check the sequence counter. */ + seqcnt_read w13 + seqcnt_check w13, 1b + b 4f +2: + cmp w0, #CLOCK_REALTIME_COARSE + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne + b.ne 8f + + /* Get coarse timespec. */ + adr vdso_data, _vdso_data +3: seqcnt_acquire + ldp x9, x10, [vdso_data, #VDSO_XTIME_CRS_SEC] + + cmp w0, #CLOCK_MONOTONIC_COARSE + b.ne 6f + + /* Get wtm timespec. */ + ldp x14, x15, [vdso_data, #VDSO_WTM_CLK_SEC] + + /* Check the sequence counter. */ + seqcnt_read w13 + seqcnt_check w13, 3b +4: + /* Add on wtm timespec. */ + add x9, x9, x14 + add x10, x10, x15 + + /* Normalise the new timespec. */ + mov x14, #NSEC_PER_SEC_LO16 + movk x14, #NSEC_PER_SEC_HI16, lsl #16 + cmp x10, x14 + b.lt 5f + sub x10, x10, x14 + add x9, x9, #1 +5: + cmp x10, #0 + b.ge 6f + add x10, x10, x14 + sub x9, x9, #1 + +6: /* Store to the user timespec. */ + stp x9, x10, [x1, #TSPEC_TV_SEC] + mov x0, xzr + ret x2 +7: + mov x30, x2 +8: /* Syscall fallback. */ + mov x8, #__NR_clock_gettime + svc #0 + ret + .cfi_endproc +ENDPROC(__kernel_clock_gettime) + +/* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ +ENTRY(__kernel_clock_getres) + .cfi_startproc + cbz w1, 3f + + cmp w0, #CLOCK_REALTIME + ccmp w0, #CLOCK_MONOTONIC, #0x4, ne + b.ne 1f + + ldr x2, 5f + b 2f +1: + cmp w0, #CLOCK_REALTIME_COARSE + ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne + b.ne 4f + ldr x2, 6f +2: + stp xzr, x2, [x1] + +3: /* res == NULL. */ + mov w0, wzr + ret + +4: /* Syscall fallback. */ + mov x8, #__NR_clock_getres + svc #0 + ret +5: + .quad CLOCK_REALTIME_RES +6: + .quad CLOCK_COARSE_RES + .cfi_endproc +ENDPROC(__kernel_clock_getres) + +/* + * Read the current time from the architected counter. + * Expects vdso_data to be initialised. + * Clobbers the temporary registers (x9 - x15). + * Returns: + * - (x9, x10) = (ts->tv_sec, ts->tv_nsec) + * - (x11, x12) = (xtime->tv_sec, xtime->tv_nsec) + * - w13 = vDSO sequence counter + */ +ENTRY(__do_get_tspec) + .cfi_startproc + + /* Read from the vDSO data page. */ + ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST] + ldp x11, x12, [vdso_data, #VDSO_XTIME_CLK_SEC] + ldp w14, w15, [vdso_data, #VDSO_CS_MULT] + seqcnt_read w13 + + /* Read the physical counter. */ + isb + mrs x9, cntpct_el0 + + /* Calculate cycle delta and convert to ns. */ + sub x10, x9, x10 + /* We can only guarantee 56 bits of precision. */ + movn x9, #0xff0, lsl #48 + and x10, x9, x10 + mul x10, x10, x14 + lsr x10, x10, x15 + + /* Use the kernel time to calculate the new timespec. */ + add x10, x12, x10 + mov x14, #NSEC_PER_SEC_LO16 + movk x14, #NSEC_PER_SEC_HI16, lsl #16 + udiv x15, x10, x14 + add x9, x15, x11 + mul x14, x14, x15 + sub x10, x10, x14 + + ret + .cfi_endproc +ENDPROC(__do_get_tspec) diff --git a/arch/arm64/kernel/vdso/note.S b/arch/arm64/kernel/vdso/note.S new file mode 100644 index 000000000000..b82c85e5d972 --- /dev/null +++ b/arch/arm64/kernel/vdso/note.S @@ -0,0 +1,28 @@ +/* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + * + * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text. + * Here we can supply some information useful to userland. + */ + +#include <linux/uts.h> +#include <linux/version.h> +#include <linux/elfnote.h> + +ELFNOTE_START(Linux, 0, "a") + .long LINUX_VERSION_CODE +ELFNOTE_END diff --git a/arch/arm64/kernel/vdso/sigreturn.S b/arch/arm64/kernel/vdso/sigreturn.S new file mode 100644 index 000000000000..20d98effa7dd --- /dev/null +++ b/arch/arm64/kernel/vdso/sigreturn.S @@ -0,0 +1,37 @@ +/* + * Sigreturn trampoline for returning from a signal when the SA_RESTORER + * flag is not set. + * + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> +#include <asm/unistd.h> + + .text + + nop +ENTRY(__kernel_rt_sigreturn) + .cfi_startproc + .cfi_signal_frame + .cfi_def_cfa x29, 0 + .cfi_offset x29, 0 * 8 + .cfi_offset x30, 1 * 8 + mov x8, #__NR_rt_sigreturn + svc #0 + .cfi_endproc +ENDPROC(__kernel_rt_sigreturn) diff --git a/arch/arm64/kernel/vdso/vdso.S b/arch/arm64/kernel/vdso/vdso.S new file mode 100644 index 000000000000..60c1db54b41a --- /dev/null +++ b/arch/arm64/kernel/vdso/vdso.S @@ -0,0 +1,33 @@ +/* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/const.h> +#include <asm/page.h> + + __PAGE_ALIGNED_DATA + + .globl vdso_start, vdso_end + .balign PAGE_SIZE +vdso_start: + .incbin "arch/arm64/kernel/vdso/vdso.so" + .balign PAGE_SIZE +vdso_end: + + .previous diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S new file mode 100644 index 000000000000..8154b8d1c826 --- /dev/null +++ b/arch/arm64/kernel/vdso/vdso.lds.S @@ -0,0 +1,100 @@ +/* + * GNU linker script for the VDSO library. +* + * Copyright (C) 2012 ARM Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author: Will Deacon <will.deacon@arm.com> + * Heavily based on the vDSO linker scripts for other archs. + */ + +#include <linux/const.h> +#include <asm/page.h> +#include <asm/vdso.h> + +OUTPUT_FORMAT("elf64-littleaarch64", "elf64-bigaarch64", "elf64-littleaarch64") +OUTPUT_ARCH(aarch64) + +SECTIONS +{ + . = VDSO_LBASE + SIZEOF_HEADERS; + + .hash : { *(.hash) } :text + .gnu.hash : { *(.gnu.hash) } + .dynsym : { *(.dynsym) } + .dynstr : { *(.dynstr) } + .gnu.version : { *(.gnu.version) } + .gnu.version_d : { *(.gnu.version_d) } + .gnu.version_r : { *(.gnu.version_r) } + + .note : { *(.note.*) } :text :note + + . = ALIGN(16); + + .text : { *(.text*) } :text =0xd503201f + PROVIDE (__etext = .); + PROVIDE (_etext = .); + PROVIDE (etext = .); + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + + .dynamic : { *(.dynamic) } :text :dynamic + + .rodata : { *(.rodata*) } :text + + _end = .; + PROVIDE(end = .); + + . = ALIGN(PAGE_SIZE); + PROVIDE(_vdso_data = .); + + /DISCARD/ : { + *(.note.GNU-stack) + *(.data .data.* .gnu.linkonce.d.* .sdata*) + *(.bss .sbss .dynbss .dynsbss) + } +} + +/* + * We must supply the ELF program headers explicitly to get just one + * PT_LOAD segment, and set the flags explicitly to make segments read-only. + */ +PHDRS +{ + text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */ + dynamic PT_DYNAMIC FLAGS(4); /* PF_R */ + note PT_NOTE FLAGS(4); /* PF_R */ + eh_frame_hdr PT_GNU_EH_FRAME; +} + +/* + * This controls what symbols we export from the DSO. + */ +VERSION +{ + LINUX_2.6.39 { + global: + __kernel_rt_sigreturn; + __kernel_gettimeofday; + __kernel_clock_gettime; + __kernel_clock_getres; + local: *; + }; +} + +/* + * Make the sigreturn code visible to the kernel. + */ +VDSO_sigtramp = __kernel_rt_sigreturn; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S new file mode 100644 index 000000000000..3fae2be8b016 --- /dev/null +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -0,0 +1,126 @@ +/* + * ld script to make ARM Linux kernel + * taken from the i386 version by Russell King + * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz> + */ + +#include <asm-generic/vmlinux.lds.h> +#include <asm/thread_info.h> +#include <asm/memory.h> +#include <asm/page.h> + +#define ARM_EXIT_KEEP(x) +#define ARM_EXIT_DISCARD(x) x + +OUTPUT_ARCH(aarch64) +ENTRY(stext) + +jiffies = jiffies_64; + +SECTIONS +{ + /* + * XXX: The linker does not define how output sections are + * assigned to input sections when there are multiple statements + * matching the same input section name. There is no documented + * order of matching. + */ + /DISCARD/ : { + ARM_EXIT_DISCARD(EXIT_TEXT) + ARM_EXIT_DISCARD(EXIT_DATA) + EXIT_CALL + *(.discard) + *(.discard.*) + } + + . = PAGE_OFFSET + TEXT_OFFSET; + + .head.text : { + _text = .; + HEAD_TEXT + } + .text : { /* Real text segment */ + _stext = .; /* Text and read-only data */ + *(.smp.pen.text) + __exception_text_start = .; + *(.exception.text) + __exception_text_end = .; + IRQENTRY_TEXT + TEXT_TEXT + SCHED_TEXT + LOCK_TEXT + *(.fixup) + *(.gnu.warning) + . = ALIGN(16); + *(.got) /* Global offset table */ + } + + RO_DATA(PAGE_SIZE) + + _etext = .; /* End of text and rodata section */ + + . = ALIGN(PAGE_SIZE); + __init_begin = .; + + INIT_TEXT_SECTION(8) + .exit.text : { + ARM_EXIT_KEEP(EXIT_TEXT) + } + . = ALIGN(16); + .init.data : { + INIT_DATA + INIT_SETUP(16) + INIT_CALLS + CON_INITCALL + SECURITY_INITCALL + INIT_RAM_FS + } + .exit.data : { + ARM_EXIT_KEEP(EXIT_DATA) + } + + PERCPU_SECTION(64) + + __init_end = .; + . = ALIGN(THREAD_SIZE); + __data_loc = .; + + .data : AT(__data_loc) { + _data = .; /* address in memory */ + _sdata = .; + + /* + * first, the init task union, aligned + * to an 8192 byte boundary. + */ + INIT_TASK_DATA(THREAD_SIZE) + NOSAVE_DATA + CACHELINE_ALIGNED_DATA(64) + READ_MOSTLY_DATA(64) + + /* + * The exception fixup table (might need resorting at runtime) + */ + . = ALIGN(32); + __start___ex_table = .; + *(__ex_table) + __stop___ex_table = .; + + /* + * and the usual data section + */ + DATA_DATA + CONSTRUCTORS + + _edata = .; + } + _edata_loc = __data_loc + SIZEOF(.data); + + NOTES + + BSS_SECTION(0, 0, 0) + _end = .; + + STABS_DEBUG + .comment 0 : { *(.comment) } +} |