summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 23:41:04 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 23:41:04 +0200
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/powerpc
parentMerge tag 'devprop-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff)
parentHAVE_ARCH_HARDENED_USERCOPY is unconditional now (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.xz
linux-5db6db0d400edd8bec274e34960cfa22838e1df5.zip
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/extable.h29
-rw-r--r--arch/powerpc/include/asm/uaccess.h96
-rw-r--r--arch/powerpc/lib/Makefile2
-rw-r--r--arch/powerpc/lib/copy_32.S14
-rw-r--r--arch/powerpc/lib/copyuser_64.S35
-rw-r--r--arch/powerpc/lib/usercopy_64.c41
7 files changed, 43 insertions, 175 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 97a8bc8a095c..053382616533 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -117,7 +117,6 @@ config PPC
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL_OLD
select HAVE_ARCH_AUDITSYSCALL
- select HAVE_ARCH_HARDENED_USERCOPY
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h
new file mode 100644
index 000000000000..07cc45cd86d9
--- /dev/null
+++ b/arch/powerpc/include/asm/extable.h
@@ -0,0 +1,29 @@
+#ifndef _ARCH_POWERPC_EXTABLE_H
+#define _ARCH_POWERPC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative addresses: the first is
+ * the address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out what
+ * to do.
+ *
+ * All the routines below use bits of fixup code that are out of line with the
+ * main instruction path. This means when everything is well, we don't even
+ * have to jump over them. Further, they do not intrude on our cache or tlb
+ * entries.
+ */
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+struct exception_table_entry {
+ int insn;
+ int fixup;
+};
+
+static inline unsigned long extable_fixup(const struct exception_table_entry *x)
+{
+ return (unsigned long)&x->fixup + x->fixup;
+}
+
+#endif
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 0e6add3187bc..5c0d8a8cdae5 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -1,18 +1,11 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
-#include <linux/sched.h>
-#include <linux/errno.h>
#include <asm/asm-compat.h>
#include <asm/ppc_asm.h>
#include <asm/processor.h>
#include <asm/page.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
+#include <asm/extable.h>
/*
* The fs value determines whether argument validity checking should be
@@ -64,31 +57,6 @@
__access_ok((__force unsigned long)(addr), (size), get_fs()))
/*
- * The exception table consists of pairs of relative addresses: the first is
- * the address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out what
- * to do.
- *
- * All the routines below use bits of fixup code that are out of line with the
- * main instruction path. This means when everything is well, we don't even
- * have to jump over them. Further, they do not intrude on our cache or tlb
- * entries.
- */
-
-#define ARCH_HAS_RELATIVE_EXTABLE
-
-struct exception_table_entry {
- int insn;
- int fixup;
-};
-
-static inline unsigned long extable_fixup(const struct exception_table_entry *x)
-{
- return (unsigned long)&x->fixup + x->fixup;
-}
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
*
@@ -301,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to,
#ifndef __powerpc64__
-static inline unsigned long copy_from_user(void *to,
- const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n))) {
- check_object_size(to, n, false);
- return __copy_tofrom_user((__force void __user *)to, from, n);
- }
- memset(to, 0, n);
- return n;
-}
-
-static inline unsigned long copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- if (access_ok(VERIFY_WRITE, to, n)) {
- check_object_size(from, n, true);
- return __copy_tofrom_user(to, (__force void __user *)from, n);
- }
- return n;
-}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
#else /* __powerpc64__ */
-#define __copy_in_user(to, from, size) \
- __copy_tofrom_user((to), (from), (size))
-
-extern unsigned long copy_from_user(void *to, const void __user *from,
- unsigned long n);
-extern unsigned long copy_to_user(void __user *to, const void *from,
- unsigned long n);
-extern unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n);
-
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+{
+ return __copy_tofrom_user(to, from, n);
+}
#endif /* __powerpc64__ */
-static inline unsigned long __copy_from_user_inatomic(void *to,
+static inline unsigned long raw_copy_from_user(void *to,
const void __user *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -360,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
return 0;
}
- check_object_size(to, n, false);
-
return __copy_tofrom_user((__force void __user *)to, from, n);
}
-static inline unsigned long __copy_to_user_inatomic(void __user *to,
+static inline unsigned long raw_copy_to_user(void __user *to,
const void *from, unsigned long n)
{
if (__builtin_constant_p(n) && (n <= 8)) {
@@ -389,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
return 0;
}
- check_object_size(from, n, true);
-
return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
-static inline unsigned long __copy_from_user(void *to,
- const void __user *from, unsigned long size)
-{
- might_fault();
- return __copy_from_user_inatomic(to, from, size);
-}
-
-static inline unsigned long __copy_to_user(void __user *to,
- const void *from, unsigned long size)
-{
- might_fault();
- return __copy_to_user_inatomic(to, from, size);
-}
-
extern unsigned long __clear_user(void __user *addr, unsigned long size);
static inline unsigned long clear_user(void __user *addr, unsigned long size)
@@ -422,7 +349,4 @@ extern long strncpy_from_user(char *dst, const char __user *src, long count);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ARCH_POWERPC_UACCESS_H */
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 2b5e09020cfe..ed7dfce331e0 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \
obj-$(CONFIG_PPC32) += div64.o copy_32.o
-obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \
+obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \
copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \
memcpy_64.o memcmp_64.o
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index ff0d894d7ff9..8aedbb5f4b86 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user)
bdnz 130b
/* then clear out the destination: r3 bytes starting at 4(r6) */
132: mfctr r3
- srwi. r0,r3,2
- li r9,0
- mtctr r0
- beq 113f
-112: stwu r9,4(r6)
- bdnz 112b
-113: andi. r0,r3,3
- mtctr r0
- beq 120f
-114: stb r9,4(r6)
- addi r6,r6,1
- bdnz 114b
120: blr
EX_TABLE(30b,108b)
@@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user)
EX_TABLE(41b,111b)
EX_TABLE(130b,132b)
EX_TABLE(131b,120b)
- EX_TABLE(112b,120b)
- EX_TABLE(114b,120b)
EXPORT_SYMBOL(__copy_tofrom_user)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index aee6e24e81ab..08da06e1bd72 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
/*
- * here we have trapped again, need to clear ctr bytes starting at r3
+ * here we have trapped again, amount remaining is in ctr.
*/
-143: mfctr r5
- li r0,0
- mr r4,r3
- mr r3,r5 /* return the number of bytes not copied */
-1: andi. r9,r4,7
- beq 3f
-90: stb r0,0(r4)
- addic. r5,r5,-1
- addi r4,r4,1
- bne 1b
- blr
-3: cmpldi cr1,r5,8
- srdi r9,r5,3
- andi. r5,r5,7
- blt cr1,93f
- mtctr r9
-91: std r0,0(r4)
- addi r4,r4,8
- bdnz 91b
-93: beqlr
- mtctr r5
-92: stb r0,0(r4)
- addi r4,r4,1
- bdnz 92b
+143: mfctr r3
blr
/*
@@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
-190:
-191:
-192:
- blr /* #bytes not copied in r3 */
+ blr
EX_TABLE(20b,120b)
EX_TABLE(220b,320b)
@@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
EX_TABLE(88b,188b)
EX_TABLE(43b,143b)
EX_TABLE(89b,189b)
- EX_TABLE(90b,190b)
- EX_TABLE(91b,191b)
- EX_TABLE(92b,192b)
/*
* Routine to copy a whole page of data, optimized for POWER4.
diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
deleted file mode 100644
index 9bd3a3dad78d..000000000000
--- a/arch/powerpc/lib/usercopy_64.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Functions which are too large to be inlined.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/module.h>
-#include <linux/uaccess.h>
-
-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_READ, from, n)))
- n = __copy_from_user(to, from, n);
- else
- memset(to, 0, n);
- return n;
-}
-
-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- if (likely(access_ok(VERIFY_WRITE, to, n)))
- n = __copy_to_user(to, from, n);
- return n;
-}
-
-unsigned long copy_in_user(void __user *to, const void __user *from,
- unsigned long n)
-{
- might_sleep();
- if (likely(access_ok(VERIFY_READ, from, n) &&
- access_ok(VERIFY_WRITE, to, n)))
- n =__copy_tofrom_user(to, from, n);
- return n;
-}
-
-EXPORT_SYMBOL(copy_from_user);
-EXPORT_SYMBOL(copy_to_user);
-EXPORT_SYMBOL(copy_in_user);
-