summaryrefslogtreecommitdiffstats
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig26
-rw-r--r--arch/arm64/include/asm/cpucaps.h5
-rw-r--r--arch/arm64/include/asm/insn.h18
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/syscall.h2
-rw-r--r--arch/arm64/include/asm/syscall_wrapper.h18
-rw-r--r--arch/arm64/kernel/cpu_errata.c48
-rw-r--r--arch/arm64/kernel/entry.S4
-rw-r--r--arch/arm64/kernel/kaslr.c6
-rw-r--r--arch/arm64/kernel/module.c44
-rw-r--r--arch/arm64/kernel/sys.c16
-rw-r--r--arch/arm64/kernel/sys32.c7
-rw-r--r--arch/arm64/kernel/syscall.c31
-rw-r--r--arch/arm64/kernel/traps.c9
-rw-r--r--arch/arm64/kvm/hyp/Makefile1
-rw-r--r--arch/arm64/kvm/hyp/switch.c39
-rw-r--r--arch/arm64/kvm/pmu.c38
-rw-r--r--arch/arm64/mm/fault.c33
18 files changed, 249 insertions, 99 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ca9c175fb949..697ea0510729 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -476,16 +476,15 @@ config ARM64_ERRATUM_1024718
If unsure, say Y.
-config ARM64_ERRATUM_1188873
+config ARM64_ERRATUM_1418040
bool "Cortex-A76/Neoverse-N1: MRC read following MRRC read of specific Generic Timer in AArch32 might give incorrect result"
default y
depends on COMPAT
- select ARM_ARCH_TIMER_OOL_WORKAROUND
help
This option adds a workaround for ARM Cortex-A76/Neoverse-N1
- erratum 1188873.
+ errata 1188873 and 1418040.
- Affected Cortex-A76/Neoverse-N1 cores (r0p0, r1p0, r2p0) could
+ Affected Cortex-A76/Neoverse-N1 cores (r0p0 to r3p1) could
cause register corruption when accessing the timer registers
from AArch32 userspace.
@@ -521,6 +520,24 @@ config ARM64_ERRATUM_1286807
If unsure, say Y.
+config ARM64_ERRATUM_1463225
+ bool "Cortex-A76: Software Step might prevent interrupt recognition"
+ default y
+ help
+ This option adds a workaround for Arm Cortex-A76 erratum 1463225.
+
+ On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
+ of a system call instruction (SVC) can prevent recognition of
+ subsequent interrupts when software stepping is disabled in the
+ exception handler of the system call and either kernel debugging
+ is enabled or VHE is in use.
+
+ Work around the erratum by triggering a dummy step exception
+ when handling a system call from a task that is being stepped
+ in a VHE configuration of the kernel.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
@@ -1406,6 +1423,7 @@ config ARM64_MODULE_PLTS
config ARM64_PSEUDO_NMI
bool "Support for NMI-like interrupts"
+ depends on BROKEN # 1556553607-46531-1-git-send-email-julien.thierry@arm.com
select CONFIG_ARM_GIC_V3
help
Adds support for mimicking Non-Maskable Interrupts through the use of
diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
index defdc67d9ab4..33401ebc187c 100644
--- a/arch/arm64/include/asm/cpucaps.h
+++ b/arch/arm64/include/asm/cpucaps.h
@@ -53,7 +53,7 @@
#define ARM64_HAS_STAGE2_FWB 32
#define ARM64_HAS_CRC32 33
#define ARM64_SSBS 34
-#define ARM64_WORKAROUND_1188873 35
+#define ARM64_WORKAROUND_1418040 35
#define ARM64_HAS_SB 36
#define ARM64_WORKAROUND_1165522 37
#define ARM64_HAS_ADDRESS_AUTH_ARCH 38
@@ -62,7 +62,8 @@
#define ARM64_HAS_GENERIC_AUTH_IMP_DEF 41
#define ARM64_HAS_IRQ_PRIO_MASKING 42
#define ARM64_HAS_DCPODP 43
+#define ARM64_WORKAROUND_1463225 44
-#define ARM64_NCAPS 44
+#define ARM64_NCAPS 45
#endif /* __ASM_CPUCAPS_H */
diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
index ec894de0ed4e..87fdfba13a30 100644
--- a/arch/arm64/include/asm/insn.h
+++ b/arch/arm64/include/asm/insn.h
@@ -18,6 +18,7 @@
*/
#ifndef __ASM_INSN_H
#define __ASM_INSN_H
+#include <linux/build_bug.h>
#include <linux/types.h>
/* A64 instructions are always 32 bits. */
@@ -266,18 +267,23 @@ enum aarch64_insn_adr_type {
AARCH64_INSN_ADR_TYPE_ADR,
};
-#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
-static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
-{ return (code & (mask)) == (val); } \
-static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
-{ return (val); }
+#define __AARCH64_INSN_FUNCS(abbr, mask, val) \
+static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
+{ \
+ return (val); \
+}
__AARCH64_INSN_FUNCS(adr, 0x9F000000, 0x10000000)
__AARCH64_INSN_FUNCS(adrp, 0x9F000000, 0x90000000)
__AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
-__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0xB8200000)
+__AARCH64_INSN_FUNCS(ldadd, 0x3F20FC00, 0x38200000)
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2a8d3f8ca22c..4bcd9c1291d5 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -592,9 +592,6 @@ static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu)
void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr);
void kvm_clr_pmu_events(u32 clr);
-void __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt);
-bool __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt);
-
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
#else
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h
index a65167f5cded..e8bcb9ecede9 100644
--- a/arch/arm64/include/asm/syscall.h
+++ b/arch/arm64/include/asm/syscall.h
@@ -20,7 +20,7 @@
#include <linux/compat.h>
#include <linux/err.h>
-typedef long (*syscall_fn_t)(struct pt_regs *regs);
+typedef long (*syscall_fn_t)(const struct pt_regs *regs);
extern const syscall_fn_t sys_call_table[];
diff --git a/arch/arm64/include/asm/syscall_wrapper.h b/arch/arm64/include/asm/syscall_wrapper.h
index a4477e515b79..507d0ee6bc69 100644
--- a/arch/arm64/include/asm/syscall_wrapper.h
+++ b/arch/arm64/include/asm/syscall_wrapper.h
@@ -30,10 +30,10 @@
} \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
-#define COMPAT_SYSCALL_DEFINE0(sname) \
- asmlinkage long __arm64_compat_sys_##sname(void); \
- ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
- asmlinkage long __arm64_compat_sys_##sname(void)
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__arm64_compat_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_compat_sys_##sname(const struct pt_regs *__unused)
#define COND_SYSCALL_COMPAT(name) \
cond_syscall(__arm64_compat_sys_##name);
@@ -62,11 +62,11 @@
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#ifndef SYSCALL_DEFINE0
-#define SYSCALL_DEFINE0(sname) \
- SYSCALL_METADATA(_##sname, 0); \
- asmlinkage long __arm64_sys_##sname(void); \
- ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
- asmlinkage long __arm64_sys_##sname(void)
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__arm64_sys_##sname, ERRNO); \
+ asmlinkage long __arm64_sys_##sname(const struct pt_regs *__unused)
#endif
#ifndef COND_SYSCALL
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
index e88d4e7bdfc7..d61beedba101 100644
--- a/arch/arm64/kernel/cpu_errata.c
+++ b/arch/arm64/kernel/cpu_errata.c
@@ -502,6 +502,22 @@ static const struct midr_range arm64_ssb_cpus[] = {
{},
};
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static bool
+has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
+ int scope)
+{
+ u32 midr = read_cpuid_id();
+ /* Cortex-A76 r0p0 - r3p1 */
+ struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
+
+ WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
+ return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
+}
+#endif
+
static void __maybe_unused
cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
{
@@ -682,12 +698,16 @@ static const struct midr_range workaround_clean_cache[] = {
};
#endif
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-static const struct midr_range erratum_1188873_list[] = {
- /* Cortex-A76 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
- /* Neoverse-N1 r0p0 to r2p0 */
- MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 2, 0),
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+/*
+ * - 1188873 affects r0p0 to r2p0
+ * - 1418040 affects r0p0 to r3p1
+ */
+static const struct midr_range erratum_1418040_list[] = {
+ /* Cortex-A76 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
+ /* Neoverse-N1 r0p0 to r3p1 */
+ MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
{},
};
#endif
@@ -809,11 +829,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.matches = has_ssbd_mitigation,
.midr_range_list = arm64_ssb_cpus,
},
-#ifdef CONFIG_ARM64_ERRATUM_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
{
- .desc = "ARM erratum 1188873",
- .capability = ARM64_WORKAROUND_1188873,
- ERRATA_MIDR_RANGE_LIST(erratum_1188873_list),
+ .desc = "ARM erratum 1418040",
+ .capability = ARM64_WORKAROUND_1418040,
+ ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_1165522
@@ -824,6 +844,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
},
#endif
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+ {
+ .desc = "ARM erratum 1463225",
+ .capability = ARM64_WORKAROUND_1463225,
+ .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
+ .matches = has_cortex_a76_erratum_1463225,
+ },
+#endif
{
}
};
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 1a7811b7e3c4..cd0c7af8e4a8 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -336,8 +336,8 @@ alternative_if ARM64_WORKAROUND_845719
alternative_else_nop_endif
#endif
3:
-#ifdef CONFIG_ARM64_ERRATUM_1188873
-alternative_if_not ARM64_WORKAROUND_1188873
+#ifdef CONFIG_ARM64_ERRATUM_1418040
+alternative_if_not ARM64_WORKAROUND_1418040
b 4f
alternative_else_nop_endif
/*
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index b09b6f75f759..06941c1fe418 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
/*
- * Randomize the module region over a 4 GB window covering the
+ * Randomize the module region over a 2 GB window covering the
* kernel. This reduces the risk of modules leaking information
* about the address of the kernel itself, but results in
* branches between modules and the core kernel that are
* resolved via PLTs. (Branches between modules will be
* resolved normally.)
*/
- module_range = SZ_4G - (u64)(_end - _stext);
- module_alloc_base = max((u64)_end + offset - SZ_4G,
+ module_range = SZ_2G - (u64)(_end - _stext);
+ module_alloc_base = max((u64)_end + offset - SZ_2G,
(u64)MODULES_VADDR);
} else {
/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index f713e2fc4d75..dd080837e6a9 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
* can simply omit this fallback in that case.
*/
p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
- module_alloc_base + SZ_4G, GFP_KERNEL,
+ module_alloc_base + SZ_2G, GFP_KERNEL,
PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
__builtin_return_address(0));
@@ -96,16 +96,50 @@ static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
{
s64 sval = do_reloc(op, place, val);
+ /*
+ * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+ * relative and absolute relocations as having a range of [-2^15, 2^16)
+ * or [-2^31, 2^32), respectively. However, in order to be able to
+ * detect overflows reliably, we have to choose whether we interpret
+ * such quantities as signed or as unsigned, and stick with it.
+ * The way we organize our address space requires a signed
+ * interpretation of 32-bit relative references, so let's use that
+ * for all R_AARCH64_PRELxx relocations. This means our upper
+ * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+ */
+
switch (len) {
case 16:
*(s16 *)place = sval;
- if (sval < S16_MIN || sval > U16_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U16_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S16_MIN || sval > S16_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 16-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 32:
*(s32 *)place = sval;
- if (sval < S32_MIN || sval > U32_MAX)
- return -ERANGE;
+ switch (op) {
+ case RELOC_OP_ABS:
+ if (sval < 0 || sval > U32_MAX)
+ return -ERANGE;
+ break;
+ case RELOC_OP_PREL:
+ if (sval < S32_MIN || sval > S32_MAX)
+ return -ERANGE;
+ break;
+ default:
+ pr_err("Invalid 32-bit data relocation (%d)\n", op);
+ return 0;
+ }
break;
case 64:
*(s64 *)place = sval;
diff --git a/arch/arm64/kernel/sys.c b/arch/arm64/kernel/sys.c
index 6f91e8116514..fe20c461582a 100644
--- a/arch/arm64/kernel/sys.c
+++ b/arch/arm64/kernel/sys.c
@@ -47,22 +47,26 @@ SYSCALL_DEFINE1(arm64_personality, unsigned int, personality)
return ksys_personality(personality);
}
+asmlinkage long sys_ni_syscall(void);
+
+asmlinkage long __arm64_sys_ni_syscall(const struct pt_regs *__unused)
+{
+ return sys_ni_syscall();
+}
+
/*
* Wrappers to pass the pt_regs argument.
*/
-#define sys_personality sys_arm64_personality
-
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
+#define __arm64_sys_personality __arm64_sys_arm64_personality
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t sys_call_table[__NR_syscalls] = {
- [0 ... __NR_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd.h>
};
diff --git a/arch/arm64/kernel/sys32.c b/arch/arm64/kernel/sys32.c
index 0f8bcb7de700..3c80a40c1c9d 100644
--- a/arch/arm64/kernel/sys32.c
+++ b/arch/arm64/kernel/sys32.c
@@ -133,17 +133,14 @@ COMPAT_SYSCALL_DEFINE6(aarch32_fallocate, int, fd, int, mode,
return ksys_fallocate(fd, mode, arg_u64(offset), arg_u64(len));
}
-asmlinkage long sys_ni_syscall(const struct pt_regs *);
-#define __arm64_sys_ni_syscall sys_ni_syscall
-
#undef __SYSCALL
#define __SYSCALL(nr, sym) asmlinkage long __arm64_##sym(const struct pt_regs *);
#include <asm/unistd32.h>
#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = (syscall_fn_t)__arm64_##sym,
+#define __SYSCALL(nr, sym) [nr] = __arm64_##sym,
const syscall_fn_t compat_sys_call_table[__NR_compat_syscalls] = {
- [0 ... __NR_compat_syscalls - 1] = (syscall_fn_t)sys_ni_syscall,
+ [0 ... __NR_compat_syscalls - 1] = __arm64_sys_ni_syscall,
#include <asm/unistd32.h>
};
diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
index 5610ac01c1ec..871c739f060a 100644
--- a/arch/arm64/kernel/syscall.c
+++ b/arch/arm64/kernel/syscall.c
@@ -8,6 +8,7 @@
#include <linux/syscalls.h>
#include <asm/daifflags.h>
+#include <asm/debug-monitors.h>
#include <asm/fpsimd.h>
#include <asm/syscall.h>
#include <asm/thread_info.h>
@@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
int syscall_trace_enter(struct pt_regs *regs);
void syscall_trace_exit(struct pt_regs *regs);
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static void cortex_a76_erratum_1463225_svc_handler(void)
+{
+ u32 reg, val;
+
+ if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
+ return;
+
+ if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
+ return;
+
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
+ reg = read_sysreg(mdscr_el1);
+ val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
+ write_sysreg(val, mdscr_el1);
+ asm volatile("msr daifclr, #8");
+ isb();
+
+ /* We will have taken a single-step exception by this point */
+
+ write_sysreg(reg, mdscr_el1);
+ __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
+}
+#else
+static void cortex_a76_erratum_1463225_svc_handler(void) { }
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
const syscall_fn_t syscall_table[])
{
@@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
regs->orig_x0 = regs->regs[0];
regs->syscallno = scno;
+ cortex_a76_erratum_1463225_svc_handler();
local_daif_restore(DAIF_PROCCTX);
user_exit();
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index ade32046f3fe..177c0f6ebabf 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -168,7 +168,6 @@ void show_stack(struct task_struct *tsk, unsigned long *sp)
static int __die(const char *str, int err, struct pt_regs *regs)
{
- struct task_struct *tsk = current;
static int die_counter;
int ret;
@@ -181,9 +180,6 @@ static int __die(const char *str, int err, struct pt_regs *regs)
return ret;
print_modules();
- pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
- TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk),
- end_of_stack(tsk));
show_regs(regs);
if (!user_mode(regs))
@@ -256,7 +252,10 @@ void arm64_force_sig_fault(int signo, int code, void __user *addr,
const char *str)
{
arm64_show_signal(signo, str);
- force_sig_fault(signo, code, addr, current);
+ if (signo == SIGKILL)
+ force_sig(SIGKILL, current);
+ else
+ force_sig_fault(signo, code, addr, current);
}
void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 82d1904328ad..ea710f674cb6 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -10,6 +10,7 @@ KVM=../../../../virt/kvm
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-cpuif-proxy.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 22b4c335e0b2..8799e0c267d4 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -16,6 +16,7 @@
*/
#include <linux/arm-smccc.h>
+#include <linux/kvm_host.h>
#include <linux/types.h>
#include <linux/jump_label.h>
#include <uapi/linux/psci.h>
@@ -505,6 +506,44 @@ static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
#endif
}
+/**
+ * Disable host events, enable guest events
+ */
+static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenclr_el0);
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenset_el0);
+
+ return (pmu->events_host || pmu->events_guest);
+}
+
+/**
+ * Disable guest events, enable host events
+ */
+static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
+{
+ struct kvm_host_data *host;
+ struct kvm_pmu_events *pmu;
+
+ host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ pmu = &host->pmu_events;
+
+ if (pmu->events_guest)
+ write_sysreg(pmu->events_guest, pmcntenclr_el0);
+
+ if (pmu->events_host)
+ write_sysreg(pmu->events_host, pmcntenset_el0);
+}
+
/* Switch to the guest for VHE systems running in EL2 */
int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index 3da94a5bb6b7..e71d00bb5271 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -53,44 +53,6 @@ void kvm_clr_pmu_events(u32 clr)
ctx->pmu_events.events_guest &= ~clr;
}
-/**
- * Disable host events, enable guest events
- */
-bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
-{
- struct kvm_host_data *host;
- struct kvm_pmu_events *pmu;
-
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
- pmu = &host->pmu_events;
-
- if (pmu->events_host)
- write_sysreg(pmu->events_host, pmcntenclr_el0);
-
- if (pmu->events_guest)
- write_sysreg(pmu->events_guest, pmcntenset_el0);
-
- return (pmu->events_host || pmu->events_guest);
-}
-
-/**
- * Disable guest events, enable host events
- */
-void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
-{
- struct kvm_host_data *host;
- struct kvm_pmu_events *pmu;
-
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
- pmu = &host->pmu_events;
-
- if (pmu->events_guest)
- write_sysreg(pmu->events_guest, pmcntenclr_el0);
-
- if (pmu->events_host)
- write_sysreg(pmu->events_host, pmcntenset_el0);
-}
-
#define PMEVTYPER_READ_CASE(idx) \
case idx: \
return read_sysreg(pmevtyper##idx##_el0)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index dda234bcc020..a30818ed9c60 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -811,6 +811,36 @@ void __init hook_debug_fault_code(int nr,
debug_fault_info[nr].name = name;
}
+#ifdef CONFIG_ARM64_ERRATUM_1463225
+DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
+
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ return 0;
+
+ if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
+ return 0;
+
+ /*
+ * We've taken a dummy step exception from the kernel to ensure
+ * that interrupts are re-enabled on the syscall path. Return back
+ * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
+ * masked so that we can safely restore the mdscr and get on with
+ * handling the syscall.
+ */
+ regs->pstate |= PSR_D_BIT;
+ return 1;
+}
+#else
+static int __exception
+cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM64_ERRATUM_1463225 */
+
asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
unsigned int esr,
struct pt_regs *regs)
@@ -818,6 +848,9 @@ asmlinkage void __exception do_debug_exception(unsigned long addr_if_watchpoint,
const struct fault_info *inf = esr_to_debug_fault_info(esr);
unsigned long pc = instruction_pointer(regs);
+ if (cortex_a76_erratum_1463225_debug_handler(regs))
+ return;
+
/*
* Tell lockdep we disabled irqs in entry.S. Do nothing if they were
* already disabled to preserve the last enabled/disabled addresses.