diff options
Diffstat (limited to 'arch/arm64/kernel')
26 files changed, 707 insertions, 460 deletions
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index eaa77ed7766a..bef04afd6031 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -16,10 +16,10 @@ arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o + cpuinfo.o cpu_errata.o alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ - sys_compat.o \ + sys_compat.o entry32.o \ ../../arm/kernel/opcodes.o arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o @@ -27,7 +27,7 @@ arm64-obj-$(CONFIG_SMP) += smp.o smp_spin_table.o topology.o arm64-obj-$(CONFIG_PERF_EVENTS) += perf_regs.o arm64-obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o arm64-obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o -arm64-obj-$(CONFIG_ARM64_CPU_SUSPEND) += sleep.o suspend.o +arm64-obj-$(CONFIG_CPU_PM) += sleep.o suspend.o arm64-obj-$(CONFIG_CPU_IDLE) += cpuidle.o arm64-obj-$(CONFIG_JUMP_LABEL) += jump_label.o arm64-obj-$(CONFIG_KGDB) += kgdb.o diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c363671d7509..7922c2e710ca 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -19,6 +19,7 @@ #include <asm/system_misc.h> #include <asm/traps.h> #include <asm/uaccess.h> +#include <asm/cpufeature.h> #define CREATE_TRACE_POINTS #include "trace-events-emulation.h" @@ -85,6 +86,57 @@ static void remove_emulation_hooks(struct insn_emulation_ops *ops) pr_notice("Removed %s emulation handler\n", ops->name); } +static void enable_insn_hw_mode(void *data) +{ + struct insn_emulation *insn = (struct insn_emulation *)data; + if (insn->ops->set_hw_mode) + insn->ops->set_hw_mode(true); +} + +static void disable_insn_hw_mode(void *data) +{ + struct insn_emulation *insn = (struct insn_emulation *)data; + if (insn->ops->set_hw_mode) + insn->ops->set_hw_mode(false); +} + +/* Run set_hw_mode(mode) on all active CPUs */ +static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable) +{ + if (!insn->ops->set_hw_mode) + return -EINVAL; + if (enable) + on_each_cpu(enable_insn_hw_mode, (void *)insn, true); + else + on_each_cpu(disable_insn_hw_mode, (void *)insn, true); + return 0; +} + +/* + * Run set_hw_mode for all insns on a starting CPU. + * Returns: + * 0 - If all the hooks ran successfully. + * -EINVAL - At least one hook is not supported by the CPU. + */ +static int run_all_insn_set_hw_mode(unsigned long cpu) +{ + int rc = 0; + unsigned long flags; + struct insn_emulation *insn; + + raw_spin_lock_irqsave(&insn_emulation_lock, flags); + list_for_each_entry(insn, &insn_emulation, node) { + bool enable = (insn->current_mode == INSN_HW); + if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) { + pr_warn("CPU[%ld] cannot support the emulation of %s", + cpu, insn->ops->name); + rc = -EINVAL; + } + } + raw_spin_unlock_irqrestore(&insn_emulation_lock, flags); + return rc; +} + static int update_insn_emulation_mode(struct insn_emulation *insn, enum insn_emulation_mode prev) { @@ -97,10 +149,8 @@ static int update_insn_emulation_mode(struct insn_emulation *insn, remove_emulation_hooks(insn->ops); break; case INSN_HW: - if (insn->ops->set_hw_mode) { - insn->ops->set_hw_mode(false); + if (!run_all_cpu_set_hw_mode(insn, false)) pr_notice("Disabled %s support\n", insn->ops->name); - } break; } @@ -111,10 +161,9 @@ static int update_insn_emulation_mode(struct insn_emulation *insn, register_emulation_hooks(insn->ops); break; case INSN_HW: - if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(true)) + ret = run_all_cpu_set_hw_mode(insn, true); + if (!ret) pr_notice("Enabled %s support\n", insn->ops->name); - else - ret = -EINVAL; break; } @@ -133,6 +182,8 @@ static void register_insn_emulation(struct insn_emulation_ops *ops) switch (ops->status) { case INSN_DEPRECATED: insn->current_mode = INSN_EMULATE; + /* Disable the HW mode if it was turned on at early boot time */ + run_all_cpu_set_hw_mode(insn, false); insn->max = INSN_HW; break; case INSN_OBSOLETE: @@ -453,8 +504,6 @@ ret: return 0; } -#define SCTLR_EL1_CP15BEN (1 << 5) - static inline void config_sctlr_el1(u32 clear, u32 set) { u32 val; @@ -465,48 +514,13 @@ static inline void config_sctlr_el1(u32 clear, u32 set) asm volatile("msr sctlr_el1, %0" : : "r" (val)); } -static void enable_cp15_ben(void *info) -{ - config_sctlr_el1(0, SCTLR_EL1_CP15BEN); -} - -static void disable_cp15_ben(void *info) -{ - config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); -} - -static int cpu_hotplug_notify(struct notifier_block *b, - unsigned long action, void *hcpu) -{ - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - enable_cp15_ben(NULL); - return NOTIFY_DONE; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_cp15_ben(NULL); - return NOTIFY_DONE; - } - - return NOTIFY_OK; -} - -static struct notifier_block cpu_hotplug_notifier = { - .notifier_call = cpu_hotplug_notify, -}; - static int cp15_barrier_set_hw_mode(bool enable) { - if (enable) { - register_cpu_notifier(&cpu_hotplug_notifier); - on_each_cpu(enable_cp15_ben, NULL, true); - } else { - unregister_cpu_notifier(&cpu_hotplug_notifier); - on_each_cpu(disable_cp15_ben, NULL, true); - } - - return true; + if (enable) + config_sctlr_el1(0, SCTLR_EL1_CP15BEN); + else + config_sctlr_el1(SCTLR_EL1_CP15BEN, 0); + return 0; } static struct undef_hook cp15_barrier_hooks[] = { @@ -534,6 +548,93 @@ static struct insn_emulation_ops cp15_barrier_ops = { .set_hw_mode = cp15_barrier_set_hw_mode, }; +static int setend_set_hw_mode(bool enable) +{ + if (!cpu_supports_mixed_endian_el0()) + return -EINVAL; + + if (enable) + config_sctlr_el1(SCTLR_EL1_SED, 0); + else + config_sctlr_el1(0, SCTLR_EL1_SED); + return 0; +} + +static int compat_setend_handler(struct pt_regs *regs, u32 big_endian) +{ + char *insn; + + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc); + + if (big_endian) { + insn = "setend be"; + regs->pstate |= COMPAT_PSR_E_BIT; + } else { + insn = "setend le"; + regs->pstate &= ~COMPAT_PSR_E_BIT; + } + + trace_instruction_emulation(insn, regs->pc); + pr_warn_ratelimited("\"%s\" (%ld) uses deprecated setend instruction at 0x%llx\n", + current->comm, (unsigned long)current->pid, regs->pc); + + return 0; +} + +static int a32_setend_handler(struct pt_regs *regs, u32 instr) +{ + int rc = compat_setend_handler(regs, (instr >> 9) & 1); + regs->pc += 4; + return rc; +} + +static int t16_setend_handler(struct pt_regs *regs, u32 instr) +{ + int rc = compat_setend_handler(regs, (instr >> 3) & 1); + regs->pc += 2; + return rc; +} + +static struct undef_hook setend_hooks[] = { + { + .instr_mask = 0xfffffdff, + .instr_val = 0xf1010000, + .pstate_mask = COMPAT_PSR_MODE_MASK, + .pstate_val = COMPAT_PSR_MODE_USR, + .fn = a32_setend_handler, + }, + { + /* Thumb mode */ + .instr_mask = 0x0000fff7, + .instr_val = 0x0000b650, + .pstate_mask = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_MASK), + .pstate_val = (COMPAT_PSR_T_BIT | COMPAT_PSR_MODE_USR), + .fn = t16_setend_handler, + }, + {} +}; + +static struct insn_emulation_ops setend_ops = { + .name = "setend", + .status = INSN_DEPRECATED, + .hooks = setend_hooks, + .set_hw_mode = setend_set_hw_mode, +}; + +static int insn_cpu_hotplug_notify(struct notifier_block *b, + unsigned long action, void *hcpu) +{ + int rc = 0; + if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) + rc = run_all_insn_set_hw_mode((unsigned long)hcpu); + + return notifier_from_errno(rc); +} + +static struct notifier_block insn_cpu_hotplug_notifier = { + .notifier_call = insn_cpu_hotplug_notify, +}; + /* * Invoked as late_initcall, since not needed before init spawned. */ @@ -545,6 +646,14 @@ static int __init armv8_deprecated_init(void) if (IS_ENABLED(CONFIG_CP15_BARRIER_EMULATION)) register_insn_emulation(&cp15_barrier_ops); + if (IS_ENABLED(CONFIG_SETEND_EMULATION)) { + if(system_supports_mixed_endian_el0()) + register_insn_emulation(&setend_ops); + else + pr_info("setend instruction emulation is not supported on the system"); + } + + register_cpu_notifier(&insn_cpu_hotplug_notifier); register_insn_emulation_sysctl(ctl_abi); return 0; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 9a9fce090d58..f7fa65d4c352 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -140,6 +140,7 @@ int main(void) DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr)); DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr)); DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr)); + DEFINE(VGIC_V3_CPU_SRE, offsetof(struct vgic_cpu, vgic_v3.vgic_sre)); DEFINE(VGIC_V3_CPU_HCR, offsetof(struct vgic_cpu, vgic_v3.vgic_hcr)); DEFINE(VGIC_V3_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v3.vgic_vmcr)); DEFINE(VGIC_V3_CPU_MISR, offsetof(struct vgic_cpu, vgic_v3.vgic_misr)); @@ -152,7 +153,7 @@ int main(void) DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base)); #endif -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_PM DEFINE(CPU_SUSPEND_SZ, sizeof(struct cpu_suspend_ctx)); DEFINE(CPU_CTX_SP, offsetof(struct cpu_suspend_ctx, sp)); DEFINE(MPIDR_HASH_MASK, offsetof(struct mpidr_hash, mask)); diff --git a/arch/arm64/kernel/cacheinfo.c b/arch/arm64/kernel/cacheinfo.c new file mode 100644 index 000000000000..b8629d52fba9 --- /dev/null +++ b/arch/arm64/kernel/cacheinfo.c @@ -0,0 +1,128 @@ +/* + * ARM64 cacheinfo support + * + * Copyright (C) 2015 ARM Ltd. + * All Rights Reserved + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/bitops.h> +#include <linux/cacheinfo.h> +#include <linux/cpu.h> +#include <linux/compiler.h> +#include <linux/of.h> + +#include <asm/cachetype.h> +#include <asm/processor.h> + +#define MAX_CACHE_LEVEL 7 /* Max 7 level supported */ +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ +#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) +#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) +#define CLIDR_CTYPE(clidr, level) \ + (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) + +static inline enum cache_type get_cache_type(int level) +{ + u64 clidr; + + if (level > MAX_CACHE_LEVEL) + return CACHE_TYPE_NOCACHE; + asm volatile ("mrs %x0, clidr_el1" : "=r" (clidr)); + return CLIDR_CTYPE(clidr, level); +} + +/* + * Cache Size Selection Register(CSSELR) selects which Cache Size ID + * Register(CCSIDR) is accessible by specifying the required cache + * level and the cache type. We need to ensure that no one else changes + * CSSELR by calling this in non-preemtible context + */ +u64 __attribute_const__ cache_get_ccsidr(u64 csselr) +{ + u64 ccsidr; + + WARN_ON(preemptible()); + + /* Put value into CSSELR */ + asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); + isb(); + /* Read result out of CCSIDR */ + asm volatile("mrs %x0, ccsidr_el1" : "=r" (ccsidr)); + + return ccsidr; +} + +static void ci_leaf_init(struct cacheinfo *this_leaf, + enum cache_type type, unsigned int level) +{ + bool is_icache = type & CACHE_TYPE_INST; + u64 tmp = cache_get_ccsidr((level - 1) << 1 | is_icache); + + this_leaf->level = level; + this_leaf->type = type; + this_leaf->coherency_line_size = CACHE_LINESIZE(tmp); + this_leaf->number_of_sets = CACHE_NUMSETS(tmp); + this_leaf->ways_of_associativity = CACHE_ASSOCIATIVITY(tmp); + this_leaf->size = this_leaf->number_of_sets * + this_leaf->coherency_line_size * this_leaf->ways_of_associativity; + this_leaf->attributes = + ((tmp & CCSIDR_EL1_WRITE_THROUGH) ? CACHE_WRITE_THROUGH : 0) | + ((tmp & CCSIDR_EL1_WRITE_BACK) ? CACHE_WRITE_BACK : 0) | + ((tmp & CCSIDR_EL1_READ_ALLOCATE) ? CACHE_READ_ALLOCATE : 0) | + ((tmp & CCSIDR_EL1_WRITE_ALLOCATE) ? CACHE_WRITE_ALLOCATE : 0); +} + +static int __init_cache_level(unsigned int cpu) +{ + unsigned int ctype, level, leaves; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + + for (level = 1, leaves = 0; level <= MAX_CACHE_LEVEL; level++) { + ctype = get_cache_type(level); + if (ctype == CACHE_TYPE_NOCACHE) { + level--; + break; + } + /* Separate instruction and data caches */ + leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; + } + + this_cpu_ci->num_levels = level; + this_cpu_ci->num_leaves = leaves; + return 0; +} + +static int __populate_cache_leaves(unsigned int cpu) +{ + unsigned int level, idx; + enum cache_type type; + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; + + for (idx = 0, level = 1; level <= this_cpu_ci->num_levels && + idx < this_cpu_ci->num_leaves; idx++, level++) { + type = get_cache_type(level); + if (type == CACHE_TYPE_SEPARATE) { + ci_leaf_init(this_leaf++, CACHE_TYPE_DATA, level); + ci_leaf_init(this_leaf++, CACHE_TYPE_INST, level); + } else { + ci_leaf_init(this_leaf++, type, level); + } + } + return 0; +} + +DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level) +DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves) diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 19d17f51db37..5c0896647fd1 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -29,3 +29,23 @@ int cpu_init_idle(unsigned int cpu) of_node_put(cpu_node); return ret; } + +/** + * cpu_suspend() - function to enter a low-power idle state + * @arg: argument to pass to CPU suspend operations + * + * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU + * operations back-end error code otherwise. + */ +int cpu_suspend(unsigned long arg) +{ + int cpu = smp_processor_id(); + + /* + * If cpu_ops have not been registered or suspend + * has not been initialized, cpu_suspend call fails early. + */ + if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) + return -EOPNOTSUPP; + return cpu_ops[cpu]->cpu_suspend(arg); +} diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 57b641747534..929855691dae 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -35,6 +35,7 @@ */ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); static struct cpuinfo_arm64 boot_cpu_data; +static bool mixed_endian_el0 = true; static char *icache_policy_str[] = { [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", @@ -68,6 +69,26 @@ static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } +bool cpu_supports_mixed_endian_el0(void) +{ + return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1)); +} + +bool system_supports_mixed_endian_el0(void) +{ + return mixed_endian_el0; +} + +static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info) +{ + mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0); +} + +static void update_cpu_features(struct cpuinfo_arm64 *info) +{ + update_mixed_endian_el0_support(info); +} + static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) { if ((boot & mask) == (cur & mask)) @@ -147,6 +168,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) * If we have AArch32, we care about 32-bit features for compat. These * registers should be RES0 otherwise. */ + diff |= CHECK(id_dfr0, boot, cur, cpu); diff |= CHECK(id_isar0, boot, cur, cpu); diff |= CHECK(id_isar1, boot, cur, cpu); diff |= CHECK(id_isar2, boot, cur, cpu); @@ -165,6 +187,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(id_pfr0, boot, cur, cpu); diff |= CHECK(id_pfr1, boot, cur, cpu); + diff |= CHECK(mvfr0, boot, cur, cpu); + diff |= CHECK(mvfr1, boot, cur, cpu); + diff |= CHECK(mvfr2, boot, cur, cpu); + /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. @@ -189,6 +215,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); @@ -202,9 +229,14 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); + cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); + update_cpu_features(info); } void cpuinfo_store_cpu(void) @@ -221,15 +253,3 @@ void __init cpuinfo_store_boot_cpu(void) boot_cpu_data = *info; } - -u64 __attribute_const__ icache_get_ccsidr(void) -{ - u64 ccsidr; - - WARN_ON(preemptible()); - - /* Select L1 I-cache and read its size ID register */ - asm("msr csselr_el1, %1; isb; mrs %0, ccsidr_el1" - : "=r"(ccsidr) : "r"(1L)); - return ccsidr; -} diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c index d27dd982ff26..f5374065ad53 100644 --- a/arch/arm64/kernel/efi-stub.c +++ b/arch/arm64/kernel/efi-stub.c @@ -13,13 +13,13 @@ #include <asm/efi.h> #include <asm/sections.h> -efi_status_t handle_kernel_image(efi_system_table_t *sys_table, - unsigned long *image_addr, - unsigned long *image_size, - unsigned long *reserve_addr, - unsigned long *reserve_size, - unsigned long dram_base, - efi_loaded_image_t *image) +efi_status_t __init handle_kernel_image(efi_system_table_t *sys_table, + unsigned long *image_addr, + unsigned long *image_size, + unsigned long *reserve_addr, + unsigned long *reserve_size, + unsigned long dram_base, + efi_loaded_image_t *image) { efi_status_t status; unsigned long kernel_size, kernel_memsize = 0; diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 6fac253bc783..b42c7b480e1e 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -11,27 +11,46 @@ * */ +#include <linux/atomic.h> #include <linux/dmi.h> #include <linux/efi.h> #include <linux/export.h> #include <linux/memblock.h> +#include <linux/mm_types.h> #include <linux/bootmem.h> #include <linux/of.h> #include <linux/of_fdt.h> +#include <linux/preempt.h> +#include <linux/rbtree.h> +#include <linux/rwsem.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <asm/cacheflush.h> #include <asm/efi.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> +#include <asm/mmu.h> +#include <asm/pgtable.h> struct efi_memory_map memmap; -static efi_runtime_services_t *runtime; - static u64 efi_system_table; +static pgd_t efi_pgd[PTRS_PER_PGD] __page_aligned_bss; + +static struct mm_struct efi_mm = { + .mm_rb = RB_ROOT, + .pgd = efi_pgd, + .mm_users = ATOMIC_INIT(2), + .mm_count = ATOMIC_INIT(1), + .mmap_sem = __RWSEM_INITIALIZER(efi_mm.mmap_sem), + .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock), + .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), + INIT_MM_CONTEXT(efi_mm) +}; + static int uefi_debug __initdata; static int __init uefi_debug_setup(char *str) { @@ -48,30 +67,33 @@ static int __init is_normal_ram(efi_memory_desc_t *md) return 0; } -static void __init efi_setup_idmap(void) +/* + * Translate a EFI virtual address into a physical address: this is necessary, + * as some data members of the EFI system table are virtually remapped after + * SetVirtualAddressMap() has been called. + */ +static phys_addr_t efi_to_phys(unsigned long addr) { - struct memblock_region *r; efi_memory_desc_t *md; - u64 paddr, npages, size; - for_each_memblock(memory, r) - create_id_mapping(r->base, r->size, 0); - - /* map runtime io spaces */ for_each_efi_memory_desc(&memmap, md) { - if (!(md->attribute & EFI_MEMORY_RUNTIME) || is_normal_ram(md)) + if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - create_id_mapping(paddr, size, 1); + if (md->virt_addr == 0) + /* no virtual mapping has been installed by the stub */ + break; + if (md->virt_addr <= addr && + (addr - md->virt_addr) < (md->num_pages << EFI_PAGE_SHIFT)) + return md->phys_addr + addr - md->virt_addr; } + return addr; } static int __init uefi_init(void) { efi_char16_t *c16; + void *config_tables; + u64 table_size; char vendor[100] = "unknown"; int i, retval; @@ -99,7 +121,7 @@ static int __init uefi_init(void) efi.systab->hdr.revision & 0xffff); /* Show what we know for posterity */ - c16 = early_memremap(efi.systab->fw_vendor, + c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), sizeof(vendor)); if (c16) { for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) @@ -112,8 +134,14 @@ static int __init uefi_init(void) efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); - retval = efi_config_init(NULL); + table_size = sizeof(efi_config_table_64_t) * efi.systab->nr_tables; + config_tables = early_memremap(efi_to_phys(efi.systab->tables), + table_size); + retval = efi_config_parse_tables(config_tables, efi.systab->nr_tables, + sizeof(efi_config_table_64_t), NULL); + + early_memunmap(config_tables, table_size); out: early_memunmap(efi.systab, sizeof(efi_system_table_t)); return retval; @@ -163,9 +191,7 @@ static __init void reserve_regions(void) if (is_normal_ram(md)) early_init_dt_add_memory_arch(paddr, size); - if (is_reserve_region(md) || - md->type == EFI_BOOT_SERVICES_CODE || - md->type == EFI_BOOT_SERVICES_DATA) { + if (is_reserve_region(md)) { memblock_reserve(paddr, size); if (uefi_debug) pr_cont("*"); @@ -178,123 +204,6 @@ static __init void reserve_regions(void) set_bit(EFI_MEMMAP, &efi.flags); } - -static u64 __init free_one_region(u64 start, u64 end) -{ - u64 size = end - start; - - if (uefi_debug) - pr_info(" EFI freeing: 0x%012llx-0x%012llx\n", start, end - 1); - - free_bootmem_late(start, size); - return size; -} - -static u64 __init free_region(u64 start, u64 end) -{ - u64 map_start, map_end, total = 0; - - if (end <= start) - return total; - - map_start = (u64)memmap.phys_map; - map_end = PAGE_ALIGN(map_start + (memmap.map_end - memmap.map)); - map_start &= PAGE_MASK; - - if (start < map_end && end > map_start) { - /* region overlaps UEFI memmap */ - if (start < map_start) - total += free_one_region(start, map_start); - - if (map_end < end) - total += free_one_region(map_end, end); - } else - total += free_one_region(start, end); - - return total; -} - -static void __init free_boot_services(void) -{ - u64 total_freed = 0; - u64 keep_end, free_start, free_end; - efi_memory_desc_t *md; - - /* - * If kernel uses larger pages than UEFI, we have to be careful - * not to inadvertantly free memory we want to keep if there is - * overlap at the kernel page size alignment. We do not want to - * free is_reserve_region() memory nor the UEFI memmap itself. - * - * The memory map is sorted, so we keep track of the end of - * any previous region we want to keep, remember any region - * we want to free and defer freeing it until we encounter - * the next region we want to keep. This way, before freeing - * it, we can clip it as needed to avoid freeing memory we - * want to keep for UEFI. - */ - - keep_end = 0; - free_start = 0; - - for_each_efi_memory_desc(&memmap, md) { - u64 paddr, npages, size; - - if (is_reserve_region(md)) { - /* - * We don't want to free any memory from this region. - */ - if (free_start) { - /* adjust free_end then free region */ - if (free_end > md->phys_addr) - free_end -= PAGE_SIZE; - total_freed += free_region(free_start, free_end); - free_start = 0; - } - keep_end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT); - continue; - } - - if (md->type != EFI_BOOT_SERVICES_CODE && - md->type != EFI_BOOT_SERVICES_DATA) { - /* no need to free this region */ - continue; - } - - /* - * We want to free memory from this region. - */ - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; - - if (free_start) { - if (paddr <= free_end) - free_end = paddr + size; - else { - total_freed += free_region(free_start, free_end); - free_start = paddr; - free_end = paddr + size; - } - } else { - free_start = paddr; - free_end = paddr + size; - } - if (free_start < keep_end) { - free_start += PAGE_SIZE; - if (free_start >= free_end) - free_start = 0; - } - } - if (free_start) - total_freed += free_region(free_start, free_end); - - if (total_freed) - pr_info("Freed 0x%llx bytes of EFI boot services memory", - total_freed); -} - void __init efi_init(void) { struct efi_fdt_params params; @@ -317,159 +226,100 @@ void __init efi_init(void) return; reserve_regions(); + early_memunmap(memmap.map, params.mmap_size); } -void __init efi_idmap_init(void) +static bool __init efi_virtmap_init(void) { - if (!efi_enabled(EFI_BOOT)) - return; - - /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ - efi_setup_idmap(); -} - -static int __init remap_region(efi_memory_desc_t *md, void **new) -{ - u64 paddr, vaddr, npages, size; - - paddr = md->phys_addr; - npages = md->num_pages; - memrange_efi_to_native(&paddr, &npages); - size = npages << PAGE_SHIFT; + efi_memory_desc_t *md; - if (is_normal_ram(md)) - vaddr = (__force u64)ioremap_cache(paddr, size); - else - vaddr = (__force u64)ioremap(paddr, size); + for_each_efi_memory_desc(&memmap, md) { + u64 paddr, npages, size; + pgprot_t prot; - if (!vaddr) { - pr_err("Unable to remap 0x%llx pages @ %p\n", - npages, (void *)paddr); - return 0; - } + if (!(md->attribute & EFI_MEMORY_RUNTIME)) + continue; + if (md->virt_addr == 0) + return false; - /* adjust for any rounding when EFI and system pagesize differs */ - md->virt_addr = vaddr + (md->phys_addr - paddr); + paddr = md->phys_addr; + npages = md->num_pages; + memrange_efi_to_native(&paddr, &npages); + size = npages << PAGE_SHIFT; - if (uefi_debug) - pr_info(" EFI remap 0x%012llx => %p\n", + pr_info(" EFI remap 0x%016llx => %p\n", md->phys_addr, (void *)md->virt_addr); - memcpy(*new, md, memmap.desc_size); - *new += memmap.desc_size; - - return 1; + /* + * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be + * executable, everything else can be mapped with the XN bits + * set. + */ + if (!is_normal_ram(md)) + prot = __pgprot(PROT_DEVICE_nGnRE); + else if (md->type == EFI_RUNTIME_SERVICES_CODE) + prot = PAGE_KERNEL_EXEC; + else + prot = PAGE_KERNEL; + + create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot); + } + return true; } /* - * Switch UEFI from an identity map to a kernel virtual map + * Enable the UEFI Runtime Services if all prerequisites are in place, i.e., + * non-early mapping of the UEFI system table and virtual mappings for all + * EFI_MEMORY_RUNTIME regions. */ -static int __init arm64_enter_virtual_mode(void) +static int __init arm64_enable_runtime_services(void) { - efi_memory_desc_t *md; - phys_addr_t virtmap_phys; - void *virtmap, *virt_md; - efi_status_t status; u64 mapsize; - int count = 0; - unsigned long flags; if (!efi_enabled(EFI_BOOT)) { pr_info("EFI services will not be available.\n"); return -1; } - mapsize = memmap.map_end - memmap.map; - early_memunmap(memmap.map, mapsize); - if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); return -1; } pr_info("Remapping and enabling EFI services.\n"); - /* replace early memmap mapping with permanent mapping */ + + mapsize = memmap.map_end - memmap.map; memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, mapsize); - memmap.map_end = memmap.map + mapsize; - - efi.memmap = &memmap; - - /* Map the runtime regions */ - virtmap = kmalloc(mapsize, GFP_KERNEL); - if (!virtmap) { - pr_err("Failed to allocate EFI virtual memmap\n"); + if (!memmap.map) { + pr_err("Failed to remap EFI memory map\n"); return -1; } - virtmap_phys = virt_to_phys(virtmap); - virt_md = virtmap; - - for_each_efi_memory_desc(&memmap, md) { - if (!(md->attribute & EFI_MEMORY_RUNTIME)) - continue; - if (!remap_region(md, &virt_md)) - goto err_unmap; - ++count; - } + memmap.map_end = memmap.map + mapsize; + efi.memmap = &memmap; - efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); + efi.systab = (__force void *)ioremap_cache(efi_system_table, + sizeof(efi_system_table_t)); if (!efi.systab) { - /* - * If we have no virtual mapping for the System Table at this - * point, the memory map doesn't cover the physical offset where - * it resides. This means the System Table will be inaccessible - * to Runtime Services themselves once the virtual mapping is - * installed. - */ - pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); - goto err_unmap; + pr_err("Failed to remap EFI System Table\n"); + return -1; } set_bit(EFI_SYSTEM_TABLES, &efi.flags); - local_irq_save(flags); - cpu_switch_mm(idmap_pg_dir, &init_mm); - - /* Call SetVirtualAddressMap with the physical address of the map */ - runtime = efi.systab->runtime; - efi.set_virtual_address_map = runtime->set_virtual_address_map; - - status = efi.set_virtual_address_map(count * memmap.desc_size, - memmap.desc_size, - memmap.desc_version, - (efi_memory_desc_t *)virtmap_phys); - cpu_set_reserved_ttbr0(); - flush_tlb_all(); - local_irq_restore(flags); - - kfree(virtmap); - - free_boot_services(); - - if (status != EFI_SUCCESS) { - pr_err("Failed to set EFI virtual address map! [%lx]\n", - status); + if (!efi_virtmap_init()) { + pr_err("No UEFI virtual mapping was installed -- runtime services will not be available\n"); return -1; } /* Set up runtime services function pointers */ - runtime = efi.systab->runtime; efi_native_runtime_setup(); set_bit(EFI_RUNTIME_SERVICES, &efi.flags); efi.runtime_version = efi.systab->hdr.revision; return 0; - -err_unmap: - /* unmap all mappings that succeeded: there are 'count' of those */ - for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { - md = virt_md; - iounmap((__force void __iomem *)md->virt_addr); - } - kfree(virtmap); - return -1; } -early_initcall(arm64_enter_virtual_mode); +early_initcall(arm64_enable_runtime_services); static int __init arm64_dmi_init(void) { @@ -484,3 +334,23 @@ static int __init arm64_dmi_init(void) return 0; } core_initcall(arm64_dmi_init); + +static void efi_set_pgd(struct mm_struct *mm) +{ + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); + if (icache_is_aivivt()) + __flush_icache_all(); +} + +void efi_virtmap_load(void) +{ + preempt_disable(); + efi_set_pgd(&efi_mm); +} + +void efi_virtmap_unload(void) +{ + efi_set_pgd(current->active_mm); + preempt_enable(); +} diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index fd4fa374e5d2..cf21bb3bf752 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid) el1_sync: kernel_entry 1 mrs x1, esr_el1 // read the syndrome register - lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class - cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1 + lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class + cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 b.eq el1_da - cmp x24, #ESR_EL1_EC_SYS64 // configurable trap + cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el1_undef - cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception + cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception b.eq el1_sp_pc - cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception b.eq el1_sp_pc - cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 b.eq el1_undef - cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1 + cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 b.ge el1_dbg b el1_inv el1_da: @@ -318,7 +318,7 @@ el1_dbg: /* * Debug exception handling */ - cmp x24, #ESR_EL1_EC_BRK64 // if BRK64 + cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 cinc x24, x24, eq // set bit '0' tbz x24, #0, el1_inv // EL1 only mrs x0, far_el1 @@ -375,26 +375,26 @@ el1_preempt: el0_sync: kernel_entry 0 mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class - cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state + lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class + cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state b.eq el0_svc - cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 + cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 b.eq el0_da - cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 + cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 b.eq el0_ia - cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access + cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc - cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception + cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception b.eq el0_fpsimd_exc - cmp x24, #ESR_EL1_EC_SYS64 // configurable trap + cmp x24, #ESR_ELx_EC_SYS64 // configurable trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception + cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception b.eq el0_sp_pc - cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception + cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception b.eq el0_sp_pc - cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef - cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 + cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 b.ge el0_dbg b el0_inv @@ -403,37 +403,37 @@ el0_sync: el0_sync_compat: kernel_entry 0, 32 mrs x25, esr_el1 // read the syndrome register - lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class - cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state + lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class + cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state b.eq el0_svc_compat - cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 + cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 b.eq el0_da - cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 + cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 b.eq el0_ia - cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access + cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access b.eq el0_fpsimd_acc - cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception + cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception b.eq el0_fpsimd_exc - cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 + cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap + cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap + cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap + cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap + cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap + cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap b.eq el0_undef - cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 + cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 b.ge el0_dbg b el0_inv el0_svc_compat: /* * AArch32 syscall handling */ - adr stbl, compat_sys_call_table // load compat syscall table pointer + adrp stbl, compat_sys_call_table // load compat syscall table pointer uxtw scno, w7 // syscall number in w7 (r7) mov sc_nr, #__NR_compat_syscalls b el0_svc_naked diff --git a/arch/arm64/kernel/sys32.S b/arch/arm64/kernel/entry32.S index 423a5b3fc2be..9a8f6ae2530e 100644 --- a/arch/arm64/kernel/sys32.S +++ b/arch/arm64/kernel/entry32.S @@ -27,26 +27,26 @@ * System call wrappers for the AArch32 compatibility layer. */ -compat_sys_sigreturn_wrapper: +ENTRY(compat_sys_sigreturn_wrapper) mov x0, sp mov x27, #0 // prevent syscall restart handling (why) b compat_sys_sigreturn ENDPROC(compat_sys_sigreturn_wrapper) -compat_sys_rt_sigreturn_wrapper: +ENTRY(compat_sys_rt_sigreturn_wrapper) mov x0, sp mov x27, #0 // prevent syscall restart handling (why) b compat_sys_rt_sigreturn ENDPROC(compat_sys_rt_sigreturn_wrapper) -compat_sys_statfs64_wrapper: +ENTRY(compat_sys_statfs64_wrapper) mov w3, #84 cmp w1, #88 csel w1, w3, w1, eq b compat_sys_statfs64 ENDPROC(compat_sys_statfs64_wrapper) -compat_sys_fstatfs64_wrapper: +ENTRY(compat_sys_fstatfs64_wrapper) mov w3, #84 cmp w1, #88 csel w1, w3, w1, eq @@ -58,33 +58,33 @@ ENDPROC(compat_sys_fstatfs64_wrapper) * in registers or that take 32-bit parameters which require sign * extension. */ -compat_sys_pread64_wrapper: +ENTRY(compat_sys_pread64_wrapper) regs_to_64 x3, x4, x5 b sys_pread64 ENDPROC(compat_sys_pread64_wrapper) -compat_sys_pwrite64_wrapper: +ENTRY(compat_sys_pwrite64_wrapper) regs_to_64 x3, x4, x5 b sys_pwrite64 ENDPROC(compat_sys_pwrite64_wrapper) -compat_sys_truncate64_wrapper: +ENTRY(compat_sys_truncate64_wrapper) regs_to_64 x1, x2, x3 b sys_truncate ENDPROC(compat_sys_truncate64_wrapper) -compat_sys_ftruncate64_wrapper: +ENTRY(compat_sys_ftruncate64_wrapper) regs_to_64 x1, x2, x3 b sys_ftruncate ENDPROC(compat_sys_ftruncate64_wrapper) -compat_sys_readahead_wrapper: +ENTRY(compat_sys_readahead_wrapper) regs_to_64 x1, x2, x3 mov w2, w4 b sys_readahead ENDPROC(compat_sys_readahead_wrapper) -compat_sys_fadvise64_64_wrapper: +ENTRY(compat_sys_fadvise64_64_wrapper) mov w6, w1 regs_to_64 x1, x2, x3 regs_to_64 x2, x4, x5 @@ -92,24 +92,14 @@ compat_sys_fadvise64_64_wrapper: b sys_fadvise64_64 ENDPROC(compat_sys_fadvise64_64_wrapper) -compat_sys_sync_file_range2_wrapper: +ENTRY(compat_sys_sync_file_range2_wrapper) regs_to_64 x2, x2, x3 regs_to_64 x3, x4, x5 b sys_sync_file_range2 ENDPROC(compat_sys_sync_file_range2_wrapper) -compat_sys_fallocate_wrapper: +ENTRY(compat_sys_fallocate_wrapper) regs_to_64 x2, x2, x3 regs_to_64 x3, x4, x5 b sys_fallocate ENDPROC(compat_sys_fallocate_wrapper) - -#undef __SYSCALL -#define __SYSCALL(x, y) .quad y // x - -/* - * The system calls table must be 4KB aligned. - */ - .align 12 -ENTRY(compat_sys_call_table) -#include <asm/unistd32.h> diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c index df1cf15377b4..98bbe06e469c 100644 --- a/arch/arm64/kernel/hw_breakpoint.c +++ b/arch/arm64/kernel/hw_breakpoint.c @@ -894,7 +894,7 @@ static struct notifier_block hw_breakpoint_reset_nb = { .notifier_call = hw_breakpoint_reset_notify, }; -#ifdef CONFIG_ARM64_CPU_SUSPEND +#ifdef CONFIG_CPU_PM extern void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)); #else static inline void cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 7e9327a0986d..27d4864577e5 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -17,14 +17,19 @@ * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bitops.h> +#include <linux/bug.h> #include <linux/compiler.h> #include <linux/kernel.h> +#include <linux/mm.h> #include <linux/smp.h> +#include <linux/spinlock.h> #include <linux/stop_machine.h> +#include <linux/types.h> #include <linux/uaccess.h> #include <asm/cacheflush.h> #include <asm/debug-monitors.h> +#include <asm/fixmap.h> #include <asm/insn.h> #define AARCH64_INSN_SF_BIT BIT(31) @@ -72,6 +77,29 @@ bool __kprobes aarch64_insn_is_nop(u32 insn) } } +static DEFINE_SPINLOCK(patch_lock); + +static void __kprobes *patch_map(void *addr, int fixmap) +{ + unsigned long uintaddr = (uintptr_t) addr; + bool module = !core_kernel_text(uintaddr); + struct page *page; + + if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) + page = vmalloc_to_page(addr); + else + page = virt_to_page(addr); + + BUG_ON(!page); + set_fixmap(fixmap, page_to_phys(page)); + + return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); +} + +static void __kprobes patch_unmap(int fixmap) +{ + clear_fixmap(fixmap); +} /* * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always * little-endian. @@ -88,10 +116,27 @@ int __kprobes aarch64_insn_read(void *addr, u32 *insnp) return ret; } +static int __kprobes __aarch64_insn_write(void *addr, u32 insn) +{ + void *waddr = addr; + unsigned long flags = 0; + int ret; + + spin_lock_irqsave(&patch_lock, flags); + waddr = patch_map(addr, FIX_TEXT_POKE0); + + ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); + + patch_unmap(FIX_TEXT_POKE0); + spin_unlock_irqrestore(&patch_lock, flags); + + return ret; +} + int __kprobes aarch64_insn_write(void *addr, u32 insn) { insn = cpu_to_le32(insn); - return probe_kernel_write(addr, &insn, AARCH64_INSN_SIZE); + return __aarch64_insn_write(addr, insn); } static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn) diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index fd027b101de5..67bf4107f6ef 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> +#include <asm/alternative.h> #include <asm/insn.h> #include <asm/sections.h> @@ -34,8 +35,8 @@ void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, - GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE, - __builtin_return_address(0)); + GFP_KERNEL, PAGE_KERNEL_EXEC, 0, + NUMA_NO_NODE, __builtin_return_address(0)); } enum aarch64_reloc_op { diff --git a/arch/arm64/kernel/pci.c b/arch/arm64/kernel/pci.c index ce5836c14ec1..6f93c24ca801 100644 --- a/arch/arm64/kernel/pci.c +++ b/arch/arm64/kernel/pci.c @@ -46,25 +46,3 @@ int pcibios_add_device(struct pci_dev *dev) return 0; } - - -#ifdef CONFIG_PCI_DOMAINS_GENERIC -static bool dt_domain_found = false; - -void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent) -{ - int domain = of_get_pci_domain_nr(parent->of_node); - - if (domain >= 0) { - dt_domain_found = true; - } else if (dt_domain_found == true) { - dev_err(parent, "Node %s is missing \"linux,pci-domain\" property in DT\n", - parent->of_node->full_name); - return; - } else { - domain = pci_get_new_domain_nr(); - } - - bus->domain_nr = domain; -} -#endif diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 6762ad705587..3f62b35fb6f1 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task) else return PERF_SAMPLE_REGS_ABI_64; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index f1dbca7d5c96..3425f311c49e 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -540,8 +540,6 @@ const struct cpu_operations cpu_psci_ops = { .name = "psci", #ifdef CONFIG_CPU_IDLE .cpu_init_idle = cpu_psci_cpu_init_idle, -#endif -#ifdef CONFIG_ARM64_CPU_SUSPEND .cpu_suspend = cpu_psci_cpu_suspend, #endif #ifdef CONFIG_SMP diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b80991166754..e8420f635bd4 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -40,6 +40,7 @@ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/memblock.h> +#include <linux/of_iommu.h> #include <linux/of_fdt.h> #include <linux/of_platform.h> #include <linux/efi.h> @@ -322,25 +323,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name()); } -/* - * Limit the memory size that was specified via FDT. - */ -static int __init early_mem(char *p) -{ - phys_addr_t limit; - - if (!p) - return 1; - - limit = memparse(p, &p) & PAGE_MASK; - pr_notice("Memory limited to %lldMB\n", limit >> 20); - - memblock_enforce_memory_limit(limit); - - return 0; -} -early_param("mem", early_mem); - static void __init request_standard_resources(void) { struct memblock_region *region; @@ -401,7 +383,7 @@ void __init setup_arch(char **cmdline_p) paging_init(); request_standard_resources(); - efi_idmap_init(); + early_ioremap_reset(); unflatten_device_tree(); @@ -424,6 +406,7 @@ void __init setup_arch(char **cmdline_p) static int __init arm64_device_init(void) { + of_iommu_init(); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); return 0; } diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 6fa792137eda..660ccf9f7524 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -131,7 +131,7 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs) struct rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 128-bit boundary, then 'sp' should diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 5a1ba6e80d4e..c20a300e2213 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -347,7 +347,7 @@ asmlinkage int compat_sys_sigreturn(struct pt_regs *regs) struct compat_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, @@ -381,7 +381,7 @@ asmlinkage int compat_sys_rt_sigreturn(struct pt_regs *regs) struct compat_rt_sigframe __user *frame; /* Always make any pending restarted system calls return -EINTR */ - current_thread_info()->restart_block.fn = do_no_restart_syscall; + current->restart_block.fn = do_no_restart_syscall; /* * Since we stacked the signal on a 64-bit boundary, @@ -440,7 +440,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, { compat_ulong_t handler = ptr_to_compat(ka->sa.sa_handler); compat_ulong_t retcode; - compat_ulong_t spsr = regs->pstate & ~PSR_f; + compat_ulong_t spsr = regs->pstate & ~(PSR_f | COMPAT_PSR_E_BIT); int thumb; /* Check if the handler is written for ARM or Thumb */ @@ -454,6 +454,9 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, /* The IT state must be cleared for both ARM and Thumb-2 */ spsr &= ~COMPAT_PSR_IT_MASK; + /* Restore the original endianness */ + spsr |= COMPAT_PSR_ENDSTATE; + if (ka->sa.sa_flags & SA_RESTORER) { retcode = ptr_to_compat(ka->sa.sa_restorer); } else { @@ -501,7 +504,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); /* set the compat FSR WnR */ - __put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) << + __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) << FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 7ae6ee085261..328b8ce4b007 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -65,7 +65,6 @@ struct secondary_data secondary_data; enum ipi_msg_type { IPI_RESCHEDULE, IPI_CALL_FUNC, - IPI_CALL_FUNC_SINGLE, IPI_CPU_STOP, IPI_TIMER, IPI_IRQ_WORK, @@ -483,7 +482,6 @@ static const char *ipi_types[NR_IPI] __tracepoint_string = { #define S(x,s) [x] = s S(IPI_RESCHEDULE, "Rescheduling interrupts"), S(IPI_CALL_FUNC, "Function call interrupts"), - S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), S(IPI_CPU_STOP, "CPU stop interrupts"), S(IPI_TIMER, "Timer broadcast interrupts"), S(IPI_IRQ_WORK, "IRQ work interrupts"), @@ -527,7 +525,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) void arch_send_call_function_single_ipi(int cpu) { - smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); + smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC); } #ifdef CONFIG_IRQ_WORK @@ -585,12 +583,6 @@ void handle_IPI(int ipinr, struct pt_regs *regs) irq_exit(); break; - case IPI_CALL_FUNC_SINGLE: - irq_enter(); - generic_smp_call_function_single_interrupt(); - irq_exit(); - break; - case IPI_CPU_STOP: irq_enter(); ipi_cpu_stop(cpu); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 4f93c67e63de..14944e5b28da 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -25,6 +25,7 @@ #include <asm/cacheflush.h> #include <asm/cpu_ops.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> extern void secondary_holding_pen(void); diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 3771b72b6569..d7daf45ae7a2 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c @@ -1,10 +1,10 @@ #include <linux/percpu.h> #include <linux/slab.h> #include <asm/cacheflush.h> -#include <asm/cpu_ops.h> #include <asm/debug-monitors.h> #include <asm/pgtable.h> #include <asm/memory.h> +#include <asm/mmu_context.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/tlbflush.h> @@ -50,26 +50,6 @@ void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) hw_breakpoint_restore = hw_bp_restore; } -/** - * cpu_suspend() - function to enter a low-power state - * @arg: argument to pass to CPU suspend operations - * - * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU - * operations back-end error code otherwise. - */ -int cpu_suspend(unsigned long arg) -{ - int cpu = smp_processor_id(); - - /* - * If cpu_ops have not been registered or suspend - * has not been initialized, cpu_suspend call fails early. - */ - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) - return -EOPNOTSUPP; - return cpu_ops[cpu]->cpu_suspend(arg); -} - /* * __cpu_suspend * @@ -98,7 +78,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) */ ret = __cpu_suspend_enter(arg, fn); if (ret == 0) { - cpu_switch_mm(mm->pgd, mm); + /* + * We are resuming from reset with TTBR0_EL1 set to the + * idmap to enable the MMU; restore the active_mm mappings in + * TTBR0_EL1 unless the active_mm == &init_mm, in which case + * the thread entered __cpu_suspend with TTBR0_EL1 set to + * reserved TTBR0 page tables and should be restored as such. + */ + if (mm == &init_mm) + cpu_set_reserved_ttbr0(); + else + cpu_switch_mm(mm->pgd, mm); + flush_tlb_all(); /* diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c index 3fa98ff14f0e..75151aaf1a52 100644 --- a/arch/arm64/kernel/sys.c +++ b/arch/arm64/kernel/sys.c @@ -39,10 +39,9 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, /* * Wrappers to pass the pt_regs argument. */ +asmlinkage long sys_rt_sigreturn_wrapper(void); #define sys_rt_sigreturn sys_rt_sigreturn_wrapper -#include <asm/syscalls.h> - #undef __SYSCALL #define __SYSCALL(nr, sym) [nr] = sym, @@ -50,7 +49,7 @@ asmlinkage long sys_mmap(unsigned long addr, unsigned long len, * The sys_call_table array must be 4K aligned to be accessible from * kernel/entry.S. */ -void *sys_call_table[__NR_syscalls] __aligned(4096) = { +void * const sys_call_table[__NR_syscalls] __aligned(4096) = { [0 ... __NR_syscalls - 1] = sys_ni_syscall, #include <asm/unistd.h> }; diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c new file mode 100644 index 000000000000..2d5ab3c90b82 --- /dev/null +++ b/arch/arm64/kernel/sys32.c @@ -0,0 +1,51 @@ +/* + * arch/arm64/kernel/sys32.c + * + * Copyright (C) 2015 ARM Ltd. + * + * This program is free software(void); you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http(void);//www.gnu.org/licenses/>. + */ + +/* + * Needed to avoid conflicting __NR_* macros between uapi/asm/unistd.h and + * asm/unistd32.h. + */ +#define __COMPAT_SYSCALL_NR + +#include <linux/compiler.h> +#include <linux/syscalls.h> + +asmlinkage long compat_sys_sigreturn_wrapper(void); +asmlinkage long compat_sys_rt_sigreturn_wrapper(void); +asmlinkage long compat_sys_statfs64_wrapper(void); +asmlinkage long compat_sys_fstatfs64_wrapper(void); +asmlinkage long compat_sys_pread64_wrapper(void); +asmlinkage long compat_sys_pwrite64_wrapper(void); +asmlinkage long compat_sys_truncate64_wrapper(void); +asmlinkage long compat_sys_ftruncate64_wrapper(void); +asmlinkage long compat_sys_readahead_wrapper(void); +asmlinkage long compat_sys_fadvise64_64_wrapper(void); +asmlinkage long compat_sys_sync_file_range2_wrapper(void); +asmlinkage long compat_sys_fallocate_wrapper(void); + +#undef __SYSCALL +#define __SYSCALL(nr, sym) [nr] = sym, + +/* + * The sys_call_table array must be 4K aligned to be accessible from + * kernel/entry.S. + */ +void * const compat_sys_call_table[__NR_compat_syscalls] __aligned(4096) = { + [0 ... __NR_compat_syscalls - 1] = sys_ni_syscall, +#include <asm/unistd32.h> +}; diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 0a801e3743d5..1ef2940df13c 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -33,6 +33,7 @@ #include <asm/atomic.h> #include <asm/debug-monitors.h> +#include <asm/esr.h> #include <asm/traps.h> #include <asm/stacktrace.h> #include <asm/exception.h> @@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) return sys_ni_syscall(); } +static const char *esr_class_str[] = { + [0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC", + [ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized", + [ESR_ELx_EC_WFx] = "WFI/WFE", + [ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC", + [ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC", + [ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC", + [ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC", + [ESR_ELx_EC_FP_ASIMD] = "ASIMD", + [ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS", + [ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC", + [ESR_ELx_EC_ILL] = "PSTATE.IL", + [ESR_ELx_EC_SVC32] = "SVC (AArch32)", + [ESR_ELx_EC_HVC32] = "HVC (AArch32)", + [ESR_ELx_EC_SMC32] = "SMC (AArch32)", + [ESR_ELx_EC_SVC64] = "SVC (AArch64)", + [ESR_ELx_EC_HVC64] = "HVC (AArch64)", + [ESR_ELx_EC_SMC64] = "SMC (AArch64)", + [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", + [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", + [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", + [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", + [ESR_ELx_EC_PC_ALIGN] = "PC Alignment", + [ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)", + [ESR_ELx_EC_DABT_CUR] = "DABT (current EL)", + [ESR_ELx_EC_SP_ALIGN] = "SP Alignment", + [ESR_ELx_EC_FP_EXC32] = "FP (AArch32)", + [ESR_ELx_EC_FP_EXC64] = "FP (AArch64)", + [ESR_ELx_EC_SERROR] = "SError", + [ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)", + [ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)", + [ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)", + [ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)", + [ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)", + [ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)", + [ESR_ELx_EC_BKPT32] = "BKPT (AArch32)", + [ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)", + [ESR_ELx_EC_BRK64] = "BRK (AArch64)", +}; + +const char *esr_get_class_string(u32 esr) +{ + return esr_class_str[esr >> ESR_ELx_EC_SHIFT]; +} + /* * bad_mode handles the impossible case in the exception vector. */ @@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) void __user *pc = (void __user *)instruction_pointer(regs); console_verbose(); - pr_crit("Bad mode in %s handler detected, code 0x%08x\n", - handler[reason], esr); + pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n", + handler[reason], esr, esr_get_class_string(esr)); __show_regs(regs); info.si_signo = SIGILL; diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 9965ec87cbec..5d9d2dca530d 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -8,6 +8,7 @@ #include <asm/thread_info.h> #include <asm/memory.h> #include <asm/page.h> +#include <asm/pgtable.h> #include "image.h" @@ -49,6 +50,14 @@ PECOFF_FILE_ALIGNMENT = 0x200; #define PECOFF_EDATA_PADDING #endif +#ifdef CONFIG_DEBUG_ALIGN_RODATA +#define ALIGN_DEBUG_RO . = ALIGN(1<<SECTION_SHIFT); +#define ALIGN_DEBUG_RO_MIN(min) ALIGN_DEBUG_RO +#else +#define ALIGN_DEBUG_RO +#define ALIGN_DEBUG_RO_MIN(min) . = ALIGN(min); +#endif + SECTIONS { /* @@ -71,6 +80,7 @@ SECTIONS _text = .; HEAD_TEXT } + ALIGN_DEBUG_RO .text : { /* Real text segment */ _stext = .; /* Text and read-only data */ __exception_text_start = .; @@ -87,19 +97,22 @@ SECTIONS *(.got) /* Global offset table */ } + ALIGN_DEBUG_RO RO_DATA(PAGE_SIZE) EXCEPTION_TABLE(8) NOTES + ALIGN_DEBUG_RO _etext = .; /* End of text and rodata section */ - . = ALIGN(PAGE_SIZE); + ALIGN_DEBUG_RO_MIN(PAGE_SIZE) __init_begin = .; INIT_TEXT_SECTION(8) .exit.text : { ARM_EXIT_KEEP(EXIT_TEXT) } - . = ALIGN(16); + + ALIGN_DEBUG_RO_MIN(16) .init.data : { INIT_DATA INIT_SETUP(16) |