diff options
author | Geoff Levand <geoff@infradead.org> | 2016-06-23 19:54:48 +0200 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2016-06-27 17:31:25 +0200 |
commit | f9076ecfb1216a478312b1c078d04792df6d4477 (patch) | |
tree | 824d82ef044ea476fe23416b2d17e6481d6d060c | |
parent | arm64: smp: Add function to determine if cpus are stuck in the kernel (diff) | |
download | linux-f9076ecfb1216a478312b1c078d04792df6d4477.tar.xz linux-f9076ecfb1216a478312b1c078d04792df6d4477.zip |
arm64: Add back cpu reset routines
Commit 68234df4ea79 ("arm64: kill flush_cache_all()") removed the global
arm64 routines cpu_reset() and cpu_soft_restart() needed by the arm64
kexec and kdump support. Add back a simplified version of
cpu_soft_restart() with some changes needed for kexec in the new files
cpu_reset.S, and cpu_reset.h.
When a CPU is reset it needs to be put into the exception level it had when
it entered the kernel. Update cpu_soft_restart() to accept an argument
which signals if the reset address should be entered at EL1 or EL2, and
add a new hypercall HVC_SOFT_RESTART which is used for the EL2 switch.
Signed-off-by: Geoff Levand <geoff@infradead.org>
Reviewed-by: James Morse <james.morse@arm.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/virt.h | 5 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu-reset.S | 54 | ||||
-rw-r--r-- | arch/arm64/kernel/cpu-reset.h | 34 | ||||
-rw-r--r-- | arch/arm64/kernel/hyp-stub.S | 10 |
4 files changed, 102 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h index dcbcf8dcbefb..bbc6a8cf83f1 100644 --- a/arch/arm64/include/asm/virt.h +++ b/arch/arm64/include/asm/virt.h @@ -34,6 +34,11 @@ */ #define HVC_SET_VECTORS 1 +/* + * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine. + */ +#define HVC_SOFT_RESTART 2 + #define BOOT_CPU_MODE_EL1 (0xe11) #define BOOT_CPU_MODE_EL2 (0xe12) diff --git a/arch/arm64/kernel/cpu-reset.S b/arch/arm64/kernel/cpu-reset.S new file mode 100644 index 000000000000..65f42d257414 --- /dev/null +++ b/arch/arm64/kernel/cpu-reset.S @@ -0,0 +1,54 @@ +/* + * CPU reset routines + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Huawei Futurewei Technologies. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/linkage.h> +#include <asm/assembler.h> +#include <asm/sysreg.h> +#include <asm/virt.h> + +.text +.pushsection .idmap.text, "ax" + +/* + * __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for + * cpu_soft_restart. + * + * @el2_switch: Flag to indicate a swich to EL2 is needed. + * @entry: Location to jump to for soft reset. + * arg0: First argument passed to @entry. + * arg1: Second argument passed to @entry. + * arg2: Third argument passed to @entry. + * + * Put the CPU into the same state as it would be if it had been reset, and + * branch to what would be the reset vector. It must be executed with the + * flat identity mapping. + */ +ENTRY(__cpu_soft_restart) + /* Clear sctlr_el1 flags. */ + mrs x12, sctlr_el1 + ldr x13, =SCTLR_ELx_FLAGS + bic x12, x12, x13 + msr sctlr_el1, x12 + isb + + cbz x0, 1f // el2_switch? + mov x0, #HVC_SOFT_RESTART + hvc #0 // no return + +1: mov x18, x1 // entry + mov x0, x2 // arg0 + mov x1, x3 // arg1 + mov x2, x4 // arg2 + br x18 +ENDPROC(__cpu_soft_restart) + +.popsection diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h new file mode 100644 index 000000000000..d4e9ecb264f0 --- /dev/null +++ b/arch/arm64/kernel/cpu-reset.h @@ -0,0 +1,34 @@ +/* + * CPU reset routines + * + * Copyright (C) 2015 Huawei Futurewei Technologies. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ARM64_CPU_RESET_H +#define _ARM64_CPU_RESET_H + +#include <asm/virt.h> + +void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry, + unsigned long arg0, unsigned long arg1, unsigned long arg2); + +static inline void __noreturn cpu_soft_restart(unsigned long el2_switch, + unsigned long entry, unsigned long arg0, unsigned long arg1, + unsigned long arg2) +{ + typeof(__cpu_soft_restart) *restart; + + el2_switch = el2_switch && !is_kernel_in_hyp_mode() && + is_hyp_mode_available(); + restart = (void *)virt_to_phys(__cpu_soft_restart); + + cpu_install_idmap(); + restart(el2_switch, entry, arg0, arg1, arg2); + unreachable(); +} + +#endif diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 8727f4490772..d3b5f75e652e 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S @@ -71,8 +71,16 @@ el1_sync: msr vbar_el2, x1 b 9f +2: cmp x0, #HVC_SOFT_RESTART + b.ne 3f + mov x0, x2 + mov x2, x4 + mov x4, x1 + mov x1, x3 + br x4 // no return + /* Someone called kvm_call_hyp() against the hyp-stub... */ -2: mov x0, #ARM_EXCEPTION_HYP_GONE +3: mov x0, #ARM_EXCEPTION_HYP_GONE 9: eret ENDPROC(el1_sync) |