summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorAndrew Donnellan <ajd@linux.ibm.com>2019-12-09 14:22:21 +0100
committerMichael Ellerman <mpe@ellerman.id.au>2019-12-16 13:19:44 +0100
commit61e3acd8c693a14fc69b824cb5b08d02cb90a6e7 (patch)
treebdc47c5f24399173d911615fb312fd0be8fa4a53 /arch/powerpc
parentpowerpc/pseries/cmm: fix managed page counts when migrating pages between zones (diff)
downloadlinux-61e3acd8c693a14fc69b824cb5b08d02cb90a6e7.tar.xz
linux-61e3acd8c693a14fc69b824cb5b08d02cb90a6e7.zip
powerpc: Fix __clear_user() with KUAP enabled
The KUAP implementation adds calls in clear_user() to enable and disable access to userspace memory. However, it doesn't add these to __clear_user(), which is used in the ptrace regset code. As there's only one direct user of __clear_user() (the regset code), and the time taken to set the AMR for KUAP purposes is going to dominate the cost of a quick access_ok(), there's not much point having a separate path. Rename __clear_user() to __arch_clear_user(), and make __clear_user() just call clear_user(). Reported-by: syzbot+f25ecf4b2982d8c7a640@syzkaller-ppc64.appspotmail.com Reported-by: Daniel Axtens <dja@axtens.net> Suggested-by: Michael Ellerman <mpe@ellerman.id.au> Fixes: de78a9c42a79 ("powerpc: Add a framework for Kernel Userspace Access Protection") Signed-off-by: Andrew Donnellan <ajd@linux.ibm.com> [mpe: Use __arch_clear_user() for the asm version like arm64 & nds32] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20191209132221.15328-1-ajd@linux.ibm.com
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/uaccess.h9
-rw-r--r--arch/powerpc/lib/string_32.S4
-rw-r--r--arch/powerpc/lib/string_64.S6
3 files changed, 12 insertions, 7 deletions
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 15002b51ff18..c92fe7fe9692 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -401,7 +401,7 @@ copy_to_user_mcsafe(void __user *to, const void *from, unsigned long n)
return n;
}
-extern unsigned long __clear_user(void __user *addr, unsigned long size);
+unsigned long __arch_clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
{
@@ -409,12 +409,17 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
might_fault();
if (likely(access_ok(addr, size))) {
allow_write_to_user(addr, size);
- ret = __clear_user(addr, size);
+ ret = __arch_clear_user(addr, size);
prevent_write_to_user(addr, size);
}
return ret;
}
+static inline unsigned long __clear_user(void __user *addr, unsigned long size)
+{
+ return clear_user(addr, size);
+}
+
extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strnlen_user(const char __user *str, long n);
diff --git a/arch/powerpc/lib/string_32.S b/arch/powerpc/lib/string_32.S
index f69a6aab7bfb..1ddb26394e8a 100644
--- a/arch/powerpc/lib/string_32.S
+++ b/arch/powerpc/lib/string_32.S
@@ -17,7 +17,7 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
-_GLOBAL(__clear_user)
+_GLOBAL(__arch_clear_user)
/*
* Use dcbz on the complete cache lines in the destination
* to set them to zero. This requires that the destination
@@ -87,4 +87,4 @@ _GLOBAL(__clear_user)
EX_TABLE(8b, 91b)
EX_TABLE(9b, 91b)
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)
diff --git a/arch/powerpc/lib/string_64.S b/arch/powerpc/lib/string_64.S
index 507b18b1660e..169872bc0892 100644
--- a/arch/powerpc/lib/string_64.S
+++ b/arch/powerpc/lib/string_64.S
@@ -17,7 +17,7 @@ PPC64_CACHES:
.section ".text"
/**
- * __clear_user: - Zero a block of memory in user space, with less checking.
+ * __arch_clear_user: - Zero a block of memory in user space, with less checking.
* @to: Destination address, in user space.
* @n: Number of bytes to zero.
*
@@ -58,7 +58,7 @@ err3; stb r0,0(r3)
mr r3,r4
blr
-_GLOBAL_TOC(__clear_user)
+_GLOBAL_TOC(__arch_clear_user)
cmpdi r4,32
neg r6,r3
li r0,0
@@ -181,4 +181,4 @@ err1; dcbz 0,r3
cmpdi r4,32
blt .Lshort_clear
b .Lmedium_clear
-EXPORT_SYMBOL(__clear_user)
+EXPORT_SYMBOL(__arch_clear_user)