diff options
author | Peter Collingbourne <pcc@google.com> | 2021-03-19 04:10:52 +0100 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2021-04-13 18:31:44 +0200 |
commit | 2f79d2fc391e4ba64df908b8c07dda6c3a907056 (patch) | |
tree | c1825e80d5a12fe7d89772a64acde6abaf0f9c01 | |
parent | kasan, arm64: tests supports for HW_TAGS async mode (diff) | |
download | linux-2f79d2fc391e4ba64df908b8c07dda6c3a907056.tar.xz linux-2f79d2fc391e4ba64df908b8c07dda6c3a907056.zip |
arm64: mte: make the per-task SCTLR_EL1 field usable elsewhere
In an upcoming change we are going to introduce per-task SCTLR_EL1
bits for PAC. Move the existing per-task SCTLR_EL1 field out of the
MTE-specific code so that we will be able to use it from both the
PAC and MTE code paths and make the task switching code more efficient.
Signed-off-by: Peter Collingbourne <pcc@google.com>
Link: https://linux-review.googlesource.com/id/Ic65fac78a7926168fa68f9e8da591c9e04ff7278
Link: https://lore.kernel.org/r/13d725cb8e741950fb9d6e64b2cd9bd54ff7c3f9.1616123271.git.pcc@google.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/include/asm/processor.h | 6 | ||||
-rw-r--r-- | arch/arm64/kernel/mte.c | 49 | ||||
-rw-r--r-- | arch/arm64/kernel/process.c | 24 |
3 files changed, 39 insertions, 40 deletions
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index ca2cd75d3286..80895bb30490 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -151,11 +151,13 @@ struct thread_struct { struct ptrauth_keys_kernel keys_kernel; #endif #ifdef CONFIG_ARM64_MTE - u64 sctlr_tcf0; u64 gcr_user_excl; #endif + u64 sctlr_user; }; +#define SCTLR_USER_MASK SCTLR_EL1_TCF0_MASK + static inline void arch_thread_struct_whitelist(unsigned long *offset, unsigned long *size) { @@ -247,6 +249,8 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); +void set_task_sctlr_el1(u64 sctlr); + /* Thread switching */ extern struct task_struct *cpu_switch_to(struct task_struct *prev, struct task_struct *next); diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 820bad94870e..8da597e41795 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -185,26 +185,6 @@ void mte_check_tfsr_el1(void) } #endif -static void update_sctlr_el1_tcf0(u64 tcf0) -{ - /* ISB required for the kernel uaccess routines */ - sysreg_clear_set(sctlr_el1, SCTLR_EL1_TCF0_MASK, tcf0); - isb(); -} - -static void set_sctlr_el1_tcf0(u64 tcf0) -{ - /* - * mte_thread_switch() checks current->thread.sctlr_tcf0 as an - * optimisation. Disable preemption so that it does not see - * the variable update before the SCTLR_EL1.TCF0 one. - */ - preempt_disable(); - current->thread.sctlr_tcf0 = tcf0; - update_sctlr_el1_tcf0(tcf0); - preempt_enable(); -} - static void update_gcr_el1_excl(u64 excl) { @@ -237,31 +217,22 @@ void flush_mte_state(void) write_sysreg_s(0, SYS_TFSRE0_EL1); clear_thread_flag(TIF_MTE_ASYNC_FAULT); /* disable tag checking */ - set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); + set_task_sctlr_el1((current->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK) | + SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } void mte_thread_switch(struct task_struct *next) { - if (!system_supports_mte()) - return; - - /* avoid expensive SCTLR_EL1 accesses if no change */ - if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) - update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); - else - isb(); - /* * Check if an async tag exception occurred at EL1. * * Note: On the context switch path we rely on the dsb() present * in __switch_to() to guarantee that the indirect writes to TFSR_EL1 * are synchronized before this point. - * isb() above is required for the same reason. - * */ + isb(); mte_check_tfsr_el1(); } @@ -291,7 +262,7 @@ void mte_suspend_exit(void) long set_mte_ctrl(struct task_struct *task, unsigned long arg) { - u64 tcf0; + u64 sctlr = task->thread.sctlr_user & ~SCTLR_EL1_TCF0_MASK; u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & SYS_GCR_EL1_EXCL_MASK; @@ -300,23 +271,23 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) switch (arg & PR_MTE_TCF_MASK) { case PR_MTE_TCF_NONE: - tcf0 = SCTLR_EL1_TCF0_NONE; + sctlr |= SCTLR_EL1_TCF0_NONE; break; case PR_MTE_TCF_SYNC: - tcf0 = SCTLR_EL1_TCF0_SYNC; + sctlr |= SCTLR_EL1_TCF0_SYNC; break; case PR_MTE_TCF_ASYNC: - tcf0 = SCTLR_EL1_TCF0_ASYNC; + sctlr |= SCTLR_EL1_TCF0_ASYNC; break; default: return -EINVAL; } if (task != current) { - task->thread.sctlr_tcf0 = tcf0; + task->thread.sctlr_user = sctlr; task->thread.gcr_user_excl = gcr_excl; } else { - set_sctlr_el1_tcf0(tcf0); + set_task_sctlr_el1(sctlr); set_gcr_el1_excl(gcr_excl); } @@ -333,7 +304,7 @@ long get_mte_ctrl(struct task_struct *task) ret = incl << PR_MTE_TAG_SHIFT; - switch (task->thread.sctlr_tcf0) { + switch (task->thread.sctlr_user & SCTLR_EL1_TCF0_MASK) { case SCTLR_EL1_TCF0_NONE: ret |= PR_MTE_TCF_NONE; break; diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 325c83b1a24d..de0ab08cb260 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -529,6 +529,27 @@ static void erratum_1418040_thread_switch(struct task_struct *prev, write_sysreg(val, cntkctl_el1); } +static void update_sctlr_el1(u64 sctlr) +{ + sysreg_clear_set(sctlr_el1, SCTLR_USER_MASK, sctlr); + + /* ISB required for the kernel uaccess routines when setting TCF0. */ + isb(); +} + +void set_task_sctlr_el1(u64 sctlr) +{ + /* + * __switch_to() checks current->thread.sctlr as an + * optimisation. Disable preemption so that it does not see + * the variable update before the SCTLR_EL1 one. + */ + preempt_disable(); + current->thread.sctlr_user = sctlr; + update_sctlr_el1(sctlr); + preempt_enable(); +} + /* * Thread switching. */ @@ -559,6 +580,9 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, * registers. */ mte_thread_switch(next); + /* avoid expensive SCTLR_EL1 accesses if no change */ + if (prev->thread.sctlr_user != next->thread.sctlr_user) + update_sctlr_el1(next->thread.sctlr_user); /* the actual thread switch */ last = cpu_switch_to(prev, next); |