diff options
Diffstat (limited to 'arch/arm64/kernel')
27 files changed, 502 insertions, 352 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04afd6031..b12e15b80516 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -12,11 +12,12 @@ CFLAGS_REMOVE_insn.o = -pg CFLAGS_REMOVE_return_address.o = -pg # Object file lists. -arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ +arm64-obj-y := debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o cacheinfo.o + hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + return_address.o cpuinfo.o cpu_errata.o \ + cpufeature.o alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c index ad7821d64a1d..21033bba9390 100644 --- a/arch/arm64/kernel/alternative.c +++ b/arch/arm64/kernel/alternative.c @@ -24,6 +24,7 @@ #include <asm/cacheflush.h> #include <asm/alternative.h> #include <asm/cpufeature.h> +#include <asm/insn.h> #include <linux/stop_machine.h> extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; @@ -33,6 +34,48 @@ struct alt_region { struct alt_instr *end; }; +/* + * Decode the imm field of a b/bl instruction, and return the byte + * offset as a signed value (so it can be used when computing a new + * branch target). + */ +static s32 get_branch_offset(u32 insn) +{ + s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn); + + /* sign-extend the immediate before turning it into a byte offset */ + return (imm << 6) >> 4; +} + +static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr) +{ + u32 insn; + + aarch64_insn_read(altinsnptr, &insn); + + /* Stop the world on instructions we don't support... */ + BUG_ON(aarch64_insn_is_cbz(insn)); + BUG_ON(aarch64_insn_is_cbnz(insn)); + BUG_ON(aarch64_insn_is_bcond(insn)); + /* ... and there is probably more. */ + + if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) { + enum aarch64_insn_branch_type type; + unsigned long target; + + if (aarch64_insn_is_b(insn)) + type = AARCH64_INSN_BRANCH_NOLINK; + else + type = AARCH64_INSN_BRANCH_LINK; + + target = (unsigned long)altinsnptr + get_branch_offset(insn); + insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr, + target, type); + } + + return insn; +} + static int __apply_alternatives(void *alt_region) { struct alt_instr *alt; @@ -40,16 +83,24 @@ static int __apply_alternatives(void *alt_region) u8 *origptr, *replptr; for (alt = region->begin; alt < region->end; alt++) { + u32 insn; + int i; + if (!cpus_have_cap(alt->cpufeature)) continue; - BUG_ON(alt->alt_len > alt->orig_len); + BUG_ON(alt->alt_len != alt->orig_len); pr_info_once("patching kernel code\n"); origptr = (u8 *)&alt->orig_offset + alt->orig_offset; replptr = (u8 *)&alt->alt_offset + alt->alt_offset; - memcpy(origptr, replptr, alt->alt_len); + + for (i = 0; i < alt->alt_len; i += sizeof(insn)) { + insn = get_alt_insn(origptr + i, replptr + i); + aarch64_insn_write(origptr + i, insn); + } + flush_icache_range((uintptr_t)origptr, (uintptr_t)(origptr + alt->alt_len)); } diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index f7fa65d4c352..da675cc5dfae 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -24,7 +24,6 @@ #include <linux/kvm_host.h> #include <asm/thread_info.h> #include <asm/memory.h> -#include <asm/cputable.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/vdso_datapage.h> @@ -38,7 +37,6 @@ int main(void) DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); BLANK(); DEFINE(THREAD_CPU_CONTEXT, offsetof(struct task_struct, thread.cpu_context)); @@ -71,9 +69,6 @@ int main(void) BLANK(); DEFINE(PAGE_SZ, PAGE_SIZE); BLANK(); - DEFINE(CPU_INFO_SZ, sizeof(struct cpu_info)); - DEFINE(CPU_INFO_SETUP, offsetof(struct cpu_info, cpu_setup)); - BLANK(); DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index fa62637e63a8..6ffd91438560 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -16,8 +16,6 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ -#define pr_fmt(fmt) "alternatives: " fmt - #include <linux/types.h> #include <asm/cpu.h> #include <asm/cputype.h> @@ -26,27 +24,11 @@ #define MIDR_CORTEX_A53 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A53) #define MIDR_CORTEX_A57 MIDR_CPU_PART(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A57) -/* - * Add a struct or another datatype to the union below if you need - * different means to detect an affected CPU. - */ -struct arm64_cpu_capabilities { - const char *desc; - u16 capability; - bool (*is_affected)(struct arm64_cpu_capabilities *); - union { - struct { - u32 midr_model; - u32 midr_range_min, midr_range_max; - }; - }; -}; - #define CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) static bool __maybe_unused -is_affected_midr_range(struct arm64_cpu_capabilities *entry) +is_affected_midr_range(const struct arm64_cpu_capabilities *entry) { u32 midr = read_cpuid_id(); @@ -59,12 +41,12 @@ is_affected_midr_range(struct arm64_cpu_capabilities *entry) } #define MIDR_RANGE(model, min, max) \ - .is_affected = is_affected_midr_range, \ + .matches = is_affected_midr_range, \ .midr_model = model, \ .midr_range_min = min, \ .midr_range_max = max -struct arm64_cpu_capabilities arm64_errata[] = { +const struct arm64_cpu_capabilities arm64_errata[] = { #if defined(CONFIG_ARM64_ERRATUM_826319) || \ defined(CONFIG_ARM64_ERRATUM_827319) || \ defined(CONFIG_ARM64_ERRATUM_824069) @@ -88,7 +70,16 @@ struct arm64_cpu_capabilities arm64_errata[] = { /* Cortex-A57 r0p0 - r1p2 */ .desc = "ARM erratum 832075", .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE, - MIDR_RANGE(MIDR_CORTEX_A57, 0x00, 0x12), + MIDR_RANGE(MIDR_CORTEX_A57, 0x00, + (1 << MIDR_VARIANT_SHIFT) | 2), + }, +#endif +#ifdef CONFIG_ARM64_ERRATUM_845719 + { + /* Cortex-A53 r0p[01234] */ + .desc = "ARM erratum 845719", + .capability = ARM64_WORKAROUND_845719, + MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04), }, #endif { @@ -97,15 +88,5 @@ struct arm64_cpu_capabilities arm64_errata[] = { void check_local_cpu_errata(void) { - struct arm64_cpu_capabilities *cpus = arm64_errata; - int i; - - for (i = 0; cpus[i].desc; i++) { - if (!cpus[i].is_affected(&cpus[i])) - continue; - - if (!cpus_have_cap(cpus[i].capability)) - pr_info("enabling workaround for %s\n", cpus[i].desc); - cpus_set_cap(cpus[i].capability); - } + check_cpu_capabilities(arm64_errata, "enabling workaround for"); } diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c new file mode 100644 index 000000000000..3d9967e43d89 --- /dev/null +++ b/arch/arm64/kernel/cpufeature.c @@ -0,0 +1,47 @@ +/* + * Contains CPU feature definitions + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "alternatives: " fmt + +#include <linux/types.h> +#include <asm/cpu.h> +#include <asm/cpufeature.h> + +static const struct arm64_cpu_capabilities arm64_features[] = { + {}, +}; + +void check_cpu_capabilities(const struct arm64_cpu_capabilities *caps, + const char *info) +{ + int i; + + for (i = 0; caps[i].desc; i++) { + if (!caps[i].matches(&caps[i])) + continue; + + if (!cpus_have_cap(caps[i].capability)) + pr_info("%s %s\n", info, caps[i].desc); + cpus_set_cap(caps[i].capability); + } +} + +void check_local_cpu_features(void) +{ + check_cpu_capabilities(arm64_features, "detected feature"); +} diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 5c0896647fd1..a78143a5c99f 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -15,7 +15,7 @@ #include <asm/cpuidle.h> #include <asm/cpu_ops.h> -int cpu_init_idle(unsigned int cpu) +int arm_cpuidle_init(unsigned int cpu) { int ret = -EOPNOTSUPP; struct device_node *cpu_node = of_cpu_device_node_get(cpu); diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 929855691dae..75d5a867e7fb 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -236,6 +236,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); + check_local_cpu_features(); update_cpu_features(info); } diff --git a/arch/arm64/kernel/cputable.c b/arch/arm64/kernel/cputable.c deleted file mode 100644 index fd3993cb060f..000000000000 --- a/arch/arm64/kernel/cputable.c +++ /dev/null @@ -1,33 +0,0 @@ -/* - * arch/arm64/kernel/cputable.c - * - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#include <linux/init.h> - -#include <asm/cputable.h> - -extern unsigned long __cpu_setup(void); - -struct cpu_info cpu_table[] = { - { - .cpu_id_val = 0x000f0000, - .cpu_id_mask = 0x000f0000, - .cpu_name = "AArch64 Processor", - .cpu_setup = __cpu_setup, - }, - { /* Empty */ }, -}; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index b42c7b480e1e..ab21e0d58278 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -337,7 +337,11 @@ core_initcall(arm64_dmi_init); static void efi_set_pgd(struct mm_struct *mm) { - cpu_switch_mm(mm->pgd, mm); + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); if (icache_is_aivivt()) __flush_icache_all(); @@ -354,3 +358,12 @@ void efi_virtmap_unload(void) efi_set_pgd(current->active_mm); preempt_enable(); } + +/* + * UpdateCapsule() depends on the system being shutdown via + * ResetSystem(). + */ +bool efi_poweroff_required(void) +{ + return efi_enabled(EFI_RUNTIME_SERVICES); +} diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index cf21bb3bf752..959fe8733560 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -21,8 +21,10 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <asm/alternative-asm.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> +#include <asm/cpufeature.h> #include <asm/errno.h> #include <asm/esr.h> #include <asm/thread_info.h> @@ -120,6 +122,24 @@ ct_user_enter ldr x23, [sp, #S_SP] // load return stack pointer msr sp_el0, x23 + +#ifdef CONFIG_ARM64_ERRATUM_845719 + alternative_insn \ + "nop", \ + "tbz x22, #4, 1f", \ + ARM64_WORKAROUND_845719 +#ifdef CONFIG_PID_IN_CONTEXTIDR + alternative_insn \ + "nop; nop", \ + "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:", \ + ARM64_WORKAROUND_845719 +#else + alternative_insn \ + "nop", \ + "msr contextidr_el1, xzr; 1:", \ + ARM64_WORKAROUND_845719 +#endif +#endif .endif msr elr_el1, x21 // set up the return data msr spsr_el1, x22 diff --git a/arch/arm64/kernel/entry32.S b/arch/arm64/kernel/entry32.S index 9a8f6ae2530e..bd9bfaa9269b 100644 --- a/arch/arm64/kernel/entry32.S +++ b/arch/arm64/kernel/entry32.S @@ -19,9 +19,12 @@ */ #include <linux/linkage.h> +#include <linux/const.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> +#include <asm/errno.h> +#include <asm/page.h> /* * System call wrappers for the AArch32 compatibility layer. @@ -54,6 +57,21 @@ ENTRY(compat_sys_fstatfs64_wrapper) ENDPROC(compat_sys_fstatfs64_wrapper) /* + * Note: off_4k (w5) is always in units of 4K. If we can't do the + * requested offset because it is not page-aligned, we return -EINVAL. + */ +ENTRY(compat_sys_mmap2_wrapper) +#if PAGE_SHIFT > 12 + tst w5, #~PAGE_MASK >> 12 + b.ne 1f + lsr w5, w5, #PAGE_SHIFT - 12 +#endif + b sys_mmap_pgoff +1: mov x0, #-EINVAL + ret +ENDPROC(compat_sys_mmap2_wrapper) + +/* * Wrappers for AArch32 syscalls that either take 64-bit parameters * in registers or that take 32-bit parameters which require sign * extension. diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556ae09d0..c851be795080 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)ftrace_graph_caller, - AARCH64_INSN_BRANCH_LINK); + AARCH64_INSN_BRANCH_NOLINK); nop = aarch64_insn_gen_nop(); if (enable) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 8ce88e08c030..19f915e8f6e0 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -36,7 +36,7 @@ #include <asm/page.h> #include <asm/virt.h> -#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) +#define __PHYS_OFFSET (KERNEL_START - TEXT_OFFSET) #if (TEXT_OFFSET & 0xfff) != 0 #error TEXT_OFFSET must be at least 4KB aligned @@ -46,13 +46,6 @@ #error TEXT_OFFSET must be less than 2MB #endif - .macro pgtbl, ttb0, ttb1, virt_to_phys - ldr \ttb1, =swapper_pg_dir - ldr \ttb0, =idmap_pg_dir - add \ttb1, \ttb1, \virt_to_phys - add \ttb0, \ttb0, \virt_to_phys - .endm - #ifdef CONFIG_ARM64_64K_PAGES #define BLOCK_SHIFT PAGE_SHIFT #define BLOCK_SIZE PAGE_SIZE @@ -63,7 +56,7 @@ #define TABLE_SHIFT PUD_SHIFT #endif -#define KERNEL_START KERNEL_RAM_VADDR +#define KERNEL_START _text #define KERNEL_END _end /* @@ -240,40 +233,43 @@ section_table: #endif ENTRY(stext) - mov x21, x0 // x21=FDT + bl preserve_boot_args bl el2_setup // Drop to EL1, w20=cpu_boot_mode - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET + adrp x24, __PHYS_OFFSET bl set_cpu_boot_mode_flag - mrs x22, midr_el1 // x22=cpuid - mov x0, x22 - bl lookup_processor_type - mov x23, x0 // x23=current cpu_table - /* - * __error_p may end up out of range for cbz if text areas are - * aligned up to section sizes. - */ - cbnz x23, 1f // invalid processor (x23=0)? - b __error_p -1: + bl __vet_fdt bl __create_page_tables // x25=TTBR0, x26=TTBR1 /* - * The following calls CPU specific code in a position independent - * manner. See arch/arm64/mm/proc.S for details. x23 = base of - * cpu_info structure selected by lookup_processor_type above. + * The following calls CPU setup code, see arch/arm64/mm/proc.S for + * details. * On return, the CPU will be ready for the MMU to be turned on and * the TCR will have been set. */ - ldr x27, __switch_data // address to jump to after + ldr x27, =__mmap_switched // address to jump to after // MMU has been enabled - adrp lr, __enable_mmu // return (PIC) address - add lr, lr, #:lo12:__enable_mmu - ldr x12, [x23, #CPU_INFO_SETUP] - add x12, x12, x28 // __virt_to_phys - br x12 // initialise processor + adr_l lr, __enable_mmu // return (PIC) address + b __cpu_setup // initialise processor ENDPROC(stext) /* + * Preserve the arguments passed by the bootloader in x0 .. x3 + */ +preserve_boot_args: + mov x21, x0 // x21=FDT + + adr_l x0, boot_args // record the contents of + stp x21, x1, [x0] // x0 .. x3 at kernel entry + stp x2, x3, [x0, #16] + + dmb sy // needed before dc ivac with + // MMU off + + add x1, x0, #0x20 // 4 x 8 bytes + b __inval_cache_range // tail call +ENDPROC(preserve_boot_args) + +/* * Determine validity of the x21 FDT pointer. * The dtb must be 8-byte aligned and live in the first 512M of memory. */ @@ -356,7 +352,8 @@ ENDPROC(__vet_fdt) * - pgd entry for fixed mappings (TTBR1) */ __create_page_tables: - pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir mov x27, lr /* @@ -385,12 +382,50 @@ __create_page_tables: * Create the identity mapping. */ mov x0, x25 // idmap_pg_dir - ldr x3, =KERNEL_START - add x3, x3, x28 // __pa(KERNEL_START) + adrp x3, KERNEL_START // __pa(KERNEL_START) + +#ifndef CONFIG_ARM64_VA_BITS_48 +#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) +#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) + + /* + * If VA_BITS < 48, it may be too small to allow for an ID mapping to be + * created that covers system RAM if that is located sufficiently high + * in the physical address space. So for the ID map, use an extended + * virtual range in that case, by configuring an additional translation + * level. + * First, we have to verify our assumption that the current value of + * VA_BITS was chosen such that all translation levels are fully + * utilised, and that lowering T0SZ will always result in an additional + * translation level to be configured. + */ +#if VA_BITS != EXTRA_SHIFT +#error "Mismatch between VA_BITS and page size/number of translation levels" +#endif + + /* + * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the + * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), + * this number conveniently equals the number of leading zeroes in + * the physical address of KERNEL_END. + */ + adrp x5, KERNEL_END + clz x5, x5 + cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? + b.ge 1f // .. then skip additional level + + adr_l x6, idmap_t0sz + str x5, [x6] + dmb sy + dc ivac, x6 // Invalidate potentially stale cache line + + create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 +1: +#endif + create_pgd_entry x0, x3, x5, x6 - ldr x6, =KERNEL_END mov x5, x3 // __pa(KERNEL_START) - add x6, x6, x28 // __pa(KERNEL_END) + adr_l x6, KERNEL_END // __pa(KERNEL_END) create_block_map x0, x7, x3, x5, x6 /* @@ -399,7 +434,7 @@ __create_page_tables: mov x0, x26 // swapper_pg_dir mov x5, #PAGE_OFFSET create_pgd_entry x0, x5, x3, x6 - ldr x6, =KERNEL_END + ldr x6, =KERNEL_END // __va(KERNEL_END) mov x3, x24 // phys offset create_block_map x0, x7, x3, x5, x6 @@ -426,6 +461,7 @@ __create_page_tables: */ mov x0, x25 add x1, x26, #SWAPPER_DIR_SIZE + dmb sy bl __inval_cache_range mov lr, x27 @@ -433,37 +469,22 @@ __create_page_tables: ENDPROC(__create_page_tables) .ltorg - .align 3 - .type __switch_data, %object -__switch_data: - .quad __mmap_switched - .quad __bss_start // x6 - .quad __bss_stop // x7 - .quad processor_id // x4 - .quad __fdt_pointer // x5 - .quad memstart_addr // x6 - .quad init_thread_union + THREAD_START_SP // sp - /* - * The following fragment of code is executed with the MMU on in MMU mode, and - * uses absolute addresses; this is not position independent. + * The following fragment of code is executed with the MMU enabled. */ + .set initial_sp, init_thread_union + THREAD_START_SP __mmap_switched: - adr x3, __switch_data + 8 + adr_l x6, __bss_start + adr_l x7, __bss_stop - ldp x6, x7, [x3], #16 1: cmp x6, x7 b.hs 2f str xzr, [x6], #8 // Clear BSS b 1b 2: - ldp x4, x5, [x3], #16 - ldr x6, [x3], #8 - ldr x16, [x3] - mov sp, x16 - str x22, [x4] // Save processor ID - str x21, [x5] // Save FDT pointer - str x24, [x6] // Save PHYS_OFFSET + adr_l sp, initial_sp, x4 + str_l x21, __fdt_pointer, x5 // Save FDT pointer + str_l x24, memstart_addr, x6 // Save PHYS_OFFSET mov x29, #0 b start_kernel ENDPROC(__mmap_switched) @@ -566,8 +587,7 @@ ENDPROC(el2_setup) * in x20. See arch/arm64/include/asm/virt.h for more info. */ ENTRY(set_cpu_boot_mode_flag) - ldr x1, =__boot_cpu_mode // Compute __boot_cpu_mode - add x1, x1, x28 + adr_l x1, __boot_cpu_mode cmp w20, #BOOT_CPU_MODE_EL2 b.ne 1f add x1, x1, #4 @@ -585,32 +605,24 @@ ENDPROC(set_cpu_boot_mode_flag) * zeroing of .bss would clobber it. */ .pushsection .data..cacheline_aligned -ENTRY(__boot_cpu_mode) .align L1_CACHE_SHIFT +ENTRY(__boot_cpu_mode) .long BOOT_CPU_MODE_EL2 - .long 0 + .long BOOT_CPU_MODE_EL1 .popsection #ifdef CONFIG_SMP - .align 3 -1: .quad . - .quad secondary_holding_pen_release - /* * This provides a "holding pen" for platforms to hold all secondary * cores are held until we're ready for them to initialise. */ ENTRY(secondary_holding_pen) bl el2_setup // Drop to EL1, w20=cpu_boot_mode - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag mrs x0, mpidr_el1 ldr x1, =MPIDR_HWID_BITMASK and x0, x0, x1 - adr x1, 1b - ldp x2, x3, [x1] - sub x1, x1, x2 - add x3, x3, x1 + adr_l x3, secondary_holding_pen_release pen: ldr x4, [x3] cmp x4, x0 b.eq secondary_startup @@ -624,7 +636,6 @@ ENDPROC(secondary_holding_pen) */ ENTRY(secondary_entry) bl el2_setup // Drop to EL1 - bl __calc_phys_offset // x24=PHYS_OFFSET, x28=PHYS_OFFSET-PAGE_OFFSET bl set_cpu_boot_mode_flag b secondary_startup ENDPROC(secondary_entry) @@ -633,16 +644,9 @@ ENTRY(secondary_startup) /* * Common entry point for secondary CPUs. */ - mrs x22, midr_el1 // x22=cpuid - mov x0, x22 - bl lookup_processor_type - mov x23, x0 // x23=current cpu_table - cbz x23, __error_p // invalid processor (x23=0)? - - pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 - ldr x12, [x23, #CPU_INFO_SETUP] - add x12, x12, x28 // __virt_to_phys - blr x12 // initialise processor + adrp x25, idmap_pg_dir + adrp x26, swapper_pg_dir + bl __cpu_setup // initialise processor ldr x21, =secondary_data ldr x27, =__secondary_switched // address to jump to after enabling the MMU @@ -658,11 +662,12 @@ ENDPROC(__secondary_switched) #endif /* CONFIG_SMP */ /* - * Setup common bits before finally enabling the MMU. Essentially this is just - * loading the page table pointer and vector base registers. + * Enable the MMU. + * + * x0 = SCTLR_EL1 value for turning on the MMU. + * x27 = *virtual* address to jump to upon completion * - * On entry to this code, x0 must contain the SCTLR_EL1 value for turning on - * the MMU. + * other registers depend on the function called upon completion */ __enable_mmu: ldr x5, =vectors @@ -670,89 +675,7 @@ __enable_mmu: msr ttbr0_el1, x25 // load TTBR0 msr ttbr1_el1, x26 // load TTBR1 isb - b __turn_mmu_on -ENDPROC(__enable_mmu) - -/* - * Enable the MMU. This completely changes the structure of the visible memory - * space. You will not be able to trace execution through this. - * - * x0 = system control register - * x27 = *virtual* address to jump to upon completion - * - * other registers depend on the function called upon completion - * - * We align the entire function to the smallest power of two larger than it to - * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET - * close to the end of a 512MB or 1GB block we might require an additional - * table to map the entire function. - */ - .align 4 -__turn_mmu_on: msr sctlr_el1, x0 isb br x27 -ENDPROC(__turn_mmu_on) - -/* - * Calculate the start of physical memory. - */ -__calc_phys_offset: - adr x0, 1f - ldp x1, x2, [x0] - sub x28, x0, x1 // x28 = PHYS_OFFSET - PAGE_OFFSET - add x24, x2, x28 // x24 = PHYS_OFFSET - ret -ENDPROC(__calc_phys_offset) - - .align 3 -1: .quad . - .quad PAGE_OFFSET - -/* - * Exception handling. Something went wrong and we can't proceed. We ought to - * tell the user, but since we don't have any guarantee that we're even - * running on the right architecture, we do virtually nothing. - */ -__error_p: -ENDPROC(__error_p) - -__error: -1: nop - b 1b -ENDPROC(__error) - -/* - * This function gets the processor ID in w0 and searches the cpu_table[] for - * a match. It returns a pointer to the struct cpu_info it found. The - * cpu_table[] must end with an empty (all zeros) structure. - * - * This routine can be called via C code and it needs to work with the MMU - * both disabled and enabled (the offset is calculated automatically). - */ -ENTRY(lookup_processor_type) - adr x1, __lookup_processor_type_data - ldp x2, x3, [x1] - sub x1, x1, x2 // get offset between VA and PA - add x3, x3, x1 // convert VA to PA -1: - ldp w5, w6, [x3] // load cpu_id_val and cpu_id_mask - cbz w5, 2f // end of list? - and w6, w6, w0 - cmp w5, w6 - b.eq 3f - add x3, x3, #CPU_INFO_SZ - b 1b -2: - mov x3, #0 // unknown processor -3: - mov x0, x3 - ret -ENDPROC(lookup_processor_type) - - .align 3 - .type __lookup_processor_type_data, %object -__lookup_processor_type_data: - .quad . - .quad cpu_table - .size __lookup_processor_type_data, . - __lookup_processor_type_data +ENDPROC(__enable_mmu) diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index 98bbe06e469c..e7d934d3afe0 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -527,7 +527,7 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp) * Disallow per-task kernel breakpoints since these would * complicate the stepping code. */ - if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.bp_target) + if (info->ctrl.privilege == AARCH64_BREAKPOINT_EL1 && bp->hw.target) return -EINVAL; return 0; diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864577e5..924902083e47 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) page = virt_to_page(addr); + else + return addr; BUG_ON(!page); set_fixmap(fixmap, page_to_phys(page)); @@ -263,23 +265,13 @@ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt) return aarch64_insn_patch_text_sync(addrs, insns, cnt); } -u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, - u32 insn, u64 imm) +static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type, + u32 *maskp, int *shiftp) { - u32 immlo, immhi, lomask, himask, mask; + u32 mask; int shift; switch (type) { - case AARCH64_INSN_IMM_ADR: - lomask = 0x3; - himask = 0x7ffff; - immlo = imm & lomask; - imm >>= 2; - immhi = imm & himask; - imm = (immlo << 24) | (immhi); - mask = (lomask << 24) | (himask); - shift = 5; - break; case AARCH64_INSN_IMM_26: mask = BIT(26) - 1; shift = 0; @@ -318,9 +310,68 @@ u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, shift = 16; break; default: - pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", - type); - return 0; + return -EINVAL; + } + + *maskp = mask; + *shiftp = shift; + + return 0; +} + +#define ADR_IMM_HILOSPLIT 2 +#define ADR_IMM_SIZE SZ_2M +#define ADR_IMM_LOMASK ((1 << ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_HIMASK ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1) +#define ADR_IMM_LOSHIFT 29 +#define ADR_IMM_HISHIFT 5 + +u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn) +{ + u32 immlo, immhi, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK; + immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK; + insn = (immhi << ADR_IMM_HILOSPLIT) | immlo; + mask = ADR_IMM_SIZE - 1; + break; + default: + if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { + pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } + } + + return (insn >> shift) & mask; +} + +u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type, + u32 insn, u64 imm) +{ + u32 immlo, immhi, mask; + int shift; + + switch (type) { + case AARCH64_INSN_IMM_ADR: + shift = 0; + immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT; + imm >>= ADR_IMM_HILOSPLIT; + immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT; + imm = immlo | immhi; + mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) | + (ADR_IMM_HIMASK << ADR_IMM_HISHIFT)); + break; + default: + if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) { + pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n", + type); + return 0; + } } /* Update the immediate field. */ diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 25a5308744b1..195991dadc37 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -25,8 +25,10 @@ #include <linux/irq.h> #include <linux/kernel.h> #include <linux/export.h> +#include <linux/of.h> #include <linux/perf_event.h> #include <linux/platform_device.h> +#include <linux/slab.h> #include <linux/spinlock.h> #include <linux/uaccess.h> @@ -322,22 +324,31 @@ out: } static int -validate_event(struct pmu_hw_events *hw_events, - struct perf_event *event) +validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, + struct perf_event *event) { - struct arm_pmu *armpmu = to_arm_pmu(event->pmu); + struct arm_pmu *armpmu; struct hw_perf_event fake_event = event->hw; struct pmu *leader_pmu = event->group_leader->pmu; if (is_software_event(event)) return 1; + /* + * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The + * core perf code won't check that the pmu->ctx == leader->ctx + * until after pmu->event_init(event). + */ + if (event->pmu != pmu) + return 0; + if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) return 1; if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) return 1; + armpmu = to_arm_pmu(event->pmu); return armpmu->get_event_idx(hw_events, &fake_event) >= 0; } @@ -355,15 +366,15 @@ validate_group(struct perf_event *event) memset(fake_used_mask, 0, sizeof(fake_used_mask)); fake_pmu.used_mask = fake_used_mask; - if (!validate_event(&fake_pmu, leader)) + if (!validate_event(event->pmu, &fake_pmu, leader)) return -EINVAL; list_for_each_entry(sibling, &leader->sibling_list, group_entry) { - if (!validate_event(&fake_pmu, sibling)) + if (!validate_event(event->pmu, &fake_pmu, sibling)) return -EINVAL; } - if (!validate_event(&fake_pmu, event)) + if (!validate_event(event->pmu, &fake_pmu, event)) return -EINVAL; return 0; @@ -396,7 +407,12 @@ armpmu_release_hardware(struct arm_pmu *armpmu) free_percpu_irq(irq, &cpu_hw_events); } else { for (i = 0; i < irqs; ++i) { - if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) + int cpu = i; + + if (armpmu->irq_affinity) + cpu = armpmu->irq_affinity[i]; + + if (!cpumask_test_and_clear_cpu(cpu, &armpmu->active_irqs)) continue; irq = platform_get_irq(pmu_device, i); if (irq > 0) @@ -450,19 +466,24 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) on_each_cpu(armpmu_enable_percpu_irq, &irq, 1); } else { for (i = 0; i < irqs; ++i) { + int cpu = i; + err = 0; irq = platform_get_irq(pmu_device, i); if (irq <= 0) continue; + if (armpmu->irq_affinity) + cpu = armpmu->irq_affinity[i]; + /* * If we have a single PMU interrupt that we can't shift, * assume that we're running on a uniprocessor machine and * continue. Otherwise, continue without this interrupt. */ - if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { + if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", - irq, i); + irq, cpu); continue; } @@ -476,7 +497,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) return err; } - cpumask_set_cpu(i, &armpmu->active_irqs); + cpumask_set_cpu(cpu, &armpmu->active_irqs); } } @@ -1289,9 +1310,46 @@ static const struct of_device_id armpmu_of_device_ids[] = { static int armpmu_device_probe(struct platform_device *pdev) { + int i, *irqs; + if (!cpu_pmu) return -ENODEV; + irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); + if (!irqs) + return -ENOMEM; + + for (i = 0; i < pdev->num_resources; ++i) { + struct device_node *dn; + int cpu; + + dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", + i); + if (!dn) { + pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", + of_node_full_name(dn), i); + break; + } + + for_each_possible_cpu(cpu) + if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) + break; + + of_node_put(dn); + if (cpu >= nr_cpu_ids) { + pr_warn("Failed to find logical CPU for %s\n", + dn->name); + break; + } + + irqs[i] = cpu; + } + + if (i == pdev->num_resources) + cpu_pmu->irq_affinity = irqs; + else + kfree(irqs); + cpu_pmu->plat_device = pdev; return 0; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index fde9923af859..c6b1f3b96f45 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -21,6 +21,7 @@ #include <stdarg.h> #include <linux/compat.h> +#include <linux/efi.h> #include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -150,6 +151,13 @@ void machine_restart(char *cmd) local_irq_disable(); smp_send_stop(); + /* + * UpdateCapsule() depends on the system being reset via + * ResetSystem(). + */ + if (efi_enabled(EFI_RUNTIME_SERVICES)) + efi_reboot(reboot_mode, NULL); + /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 000000000000..cf83e61cd3b5 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Will Deacon <will.deacon@arm.com> + */ + +#include <linux/linkage.h> + +/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + hvc #0 + ret +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_smc) + smc #0 + ret +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f311c49e..9b8a70ae64a1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -57,6 +57,9 @@ static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, PSCI_0_2_POWER_STATE_AFFL_SHIFT; } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "hvc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index e8420f635bd4..51ef97274b52 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -50,7 +50,6 @@ #include <asm/cpu.h> #include <asm/cputype.h> #include <asm/elf.h> -#include <asm/cputable.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> #include <asm/sections.h> @@ -62,9 +61,7 @@ #include <asm/memblock.h> #include <asm/psci.h> #include <asm/efi.h> - -unsigned int processor_id; -EXPORT_SYMBOL(processor_id); +#include <asm/virt.h> unsigned long elf_hwcap __read_mostly; EXPORT_SYMBOL_GPL(elf_hwcap); @@ -83,7 +80,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); -static const char *cpu_name; phys_addr_t __fdt_pointer __initdata; /* @@ -119,6 +115,11 @@ void __init early_print(const char *str, ...) printk("%s", buf); } +/* + * The recorded values of x0 .. x3 upon kernel entry. + */ +u64 __cacheline_aligned boot_args[4]; + void __init smp_setup_processor_id(void) { u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK; @@ -207,24 +208,38 @@ static void __init smp_build_mpidr_hash(void) } #endif +static void __init hyp_mode_check(void) +{ + if (is_hyp_mode_available()) + pr_info("CPU: All CPU(s) started at EL2\n"); + else if (is_hyp_mode_mismatched()) + WARN_TAINT(1, TAINT_CPU_OUT_OF_SPEC, + "CPU: CPUs started in inconsistent modes"); + else + pr_info("CPU: All CPU(s) started at EL1\n"); +} + +void __init do_post_cpus_up_work(void) +{ + hyp_mode_check(); + apply_alternatives_all(); +} + +#ifdef CONFIG_UP_LATE_INIT +void __init up_late_init(void) +{ + do_post_cpus_up_work(); +} +#endif /* CONFIG_UP_LATE_INIT */ + static void __init setup_processor(void) { - struct cpu_info *cpu_info; u64 features, block; u32 cwg; int cls; - cpu_info = lookup_processor_type(read_cpuid_id()); - if (!cpu_info) { - printk("CPU configuration botched (ID %08x), unable to continue.\n", - read_cpuid_id()); - while (1); - } - - cpu_name = cpu_info->cpu_name; - - printk("CPU: %s [%08x] revision %d\n", - cpu_name, read_cpuid_id(), read_cpuid_id() & 15); + printk("CPU: AArch64 Processor [%08x] revision %d\n", + read_cpuid_id(), read_cpuid_id() & 15); sprintf(init_utsname()->machine, ELF_PLATFORM); elf_hwcap = 0; @@ -402,6 +417,12 @@ void __init setup_arch(char **cmdline_p) conswitchp = &dummy_con; #endif #endif + if (boot_args[1] || boot_args[2] || boot_args[3]) { + pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n" + "\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n" + "This indicates a broken bootloader or old kernel\n", + boot_args[1], boot_args[2], boot_args[3]); + } } static int __init arm64_device_init(void) diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 660ccf9f7524..e18c48cb6db1 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -287,19 +287,12 @@ static void setup_restart_syscall(struct pt_regs *regs) */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { - struct thread_info *thread = current_thread_info(); struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; /* - * translate the signal - */ - if (usig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap) - usig = thread->exec_domain->signal_invmap[usig]; - - /* * Set up the stack frame */ if (is_compat_task()) { diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300e2213..d26fcd4cd6e6 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_SYS: err |= __put_user((compat_uptr_t)(unsigned long) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 328b8ce4b007..ffe8e1b814e0 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -151,6 +151,7 @@ asmlinkage void secondary_start_kernel(void) */ cpu_set_reserved_ttbr0(); flush_tlb_all(); + cpu_set_default_tcr_t0sz(); preempt_disable(); trace_hardirqs_off(); @@ -309,7 +310,7 @@ void cpu_die(void) void __init smp_cpus_done(unsigned int max_cpus) { pr_info("SMP: Total of %d processors activated.\n", num_online_cpus()); - apply_alternatives_all(); + do_post_cpus_up_work(); } void __init smp_prepare_boot_cpu(void) diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c index 2d5ab3c90b82..a40b1343b819 100644 --- a/arch/arm64/kernel/sys32.c +++ b/arch/arm64/kernel/sys32.c @@ -37,6 +37,7 @@ asmlinkage long compat_sys_readahead_wrapper(void); asmlinkage long compat_sys_fadvise64_64_wrapper(void); asmlinkage long compat_sys_sync_file_range2_wrapper(void); asmlinkage long compat_sys_fallocate_wrapper(void); +asmlinkage long compat_sys_mmap2_wrapper(void); #undef __SYSCALL #define __SYSCALL(nr, sym) [nr] = sym, diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 32aeea083d93..ec37ab3f524f 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -200,7 +200,7 @@ up_fail: void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -213,11 +213,11 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = tk->tkr.cycle_last; + vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; - vdso_data->cs_mult = tk->tkr.mult; - vdso_data->cs_shift = tk->tkr.shift; + vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec; + vdso_data->cs_mult = tk->tkr_mono.mult; + vdso_data->cs_shift = tk->tkr_mono.shift; } smp_wmb(); diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ffd34c2..efa79e8d4196 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc - cbz w1, 3f - cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne b.ne 1f @@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: + cbz w1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 5d9d2dca530d..a2c29865c3fe 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -23,10 +23,14 @@ jiffies = jiffies_64; #define HYPERVISOR_TEXT \ /* \ - * Force the alignment to be compatible with \ - * the vectors requirements \ + * Align to 4 KB so that \ + * a) the HYP vector table is at its minimum \ + * alignment of 2048 bytes \ + * b) the HYP init code will not cross a page \ + * boundary if its size does not exceed \ + * 4 KB (see related ASSERT() below) \ */ \ - . = ALIGN(2048); \ + . = ALIGN(SZ_4K); \ VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ *(.hyp.idmap.text) \ VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; \ @@ -163,10 +167,11 @@ SECTIONS } /* - * The HYP init code can't be more than a page long. + * The HYP init code can't be more than a page long, + * and should not cross a page boundary. */ -ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), - "HYP init code too big") +ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & ~(SZ_4K - 1)) <= SZ_4K, + "HYP init code too big or misaligned") /* * If padding is applied before .head.text, virt<->phys conversions will fail. |