summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/traps.c
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2016-10-19 15:40:54 +0200
committerWill Deacon <will.deacon@arm.com>2016-10-20 10:50:49 +0200
commit87261d19046aeaeed8eb3d2793fde850ae1b5c9e (patch)
tree5e27605b4c18c29fbc1307d92bc1ef30e4ab6883 /arch/arm64/kernel/traps.c
parentarm64: percpu: rewrite ll/sc loops in assembly (diff)
downloadlinux-87261d19046aeaeed8eb3d2793fde850ae1b5c9e.tar.xz
linux-87261d19046aeaeed8eb3d2793fde850ae1b5c9e.zip
arm64: Cortex-A53 errata workaround: check for kernel addresses
Commit 7dd01aef0557 ("arm64: trap userspace "dc cvau" cache operation on errata-affected core") adds code to execute cache maintenance instructions in the kernel on behalf of userland on CPUs with certain ARM CPU errata. It turns out that the address hasn't been checked to be a valid user space address, allowing userland to clean cache lines in kernel space. Fix this by introducing an address check before executing the instructions on behalf of userland. Since the address doesn't come via a syscall parameter, we can't just reject tagged pointers and instead have to remove the tag when checking against the user address limit. Cc: <stable@vger.kernel.org> Fixes: 7dd01aef0557 ("arm64: trap userspace "dc cvau" cache operation on errata-affected core") Reported-by: Kristina Martsenko <kristina.martsenko@arm.com> Signed-off-by: Andre Przywara <andre.przywara@arm.com> [will: rework commit message + replace access_ok with max_user_addr()] Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to '')
-rw-r--r--arch/arm64/kernel/traps.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5ff020f8fb7f..7255c9d6cfb7 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -434,18 +434,21 @@ void cpu_enable_cache_maint_trap(void *__unused)
}
#define __user_cache_maint(insn, address, res) \
- asm volatile ( \
- "1: " insn ", %1\n" \
- " mov %w0, #0\n" \
- "2:\n" \
- " .pushsection .fixup,\"ax\"\n" \
- " .align 2\n" \
- "3: mov %w0, %w2\n" \
- " b 2b\n" \
- " .popsection\n" \
- _ASM_EXTABLE(1b, 3b) \
- : "=r" (res) \
- : "r" (address), "i" (-EFAULT) )
+ if (untagged_addr(address) >= user_addr_max()) \
+ res = -EFAULT; \
+ else \
+ asm volatile ( \
+ "1: " insn ", %1\n" \
+ " mov %w0, #0\n" \
+ "2:\n" \
+ " .pushsection .fixup,\"ax\"\n" \
+ " .align 2\n" \
+ "3: mov %w0, %w2\n" \
+ " b 2b\n" \
+ " .popsection\n" \
+ _ASM_EXTABLE(1b, 3b) \
+ : "=r" (res) \
+ : "r" (address), "i" (-EFAULT) )
static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
{