diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 23:41:04 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-05-01 23:41:04 +0200 |
commit | 5db6db0d400edd8bec274e34960cfa22838e1df5 (patch) | |
tree | 3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/s390/lib/uaccess.c | |
parent | Merge tag 'devprop-4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git... (diff) | |
parent | HAVE_ARCH_HARDENED_USERCOPY is unconditional now (diff) | |
download | linux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.xz linux-5db6db0d400edd8bec274e34960cfa22838e1df5.zip |
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro:
"This is the uaccess unification pile. It's _not_ the end of uaccess
work, but the next batch of that will go into the next cycle. This one
mostly takes copy_from_user() and friends out of arch/* and gets the
zero-padding behaviour in sync for all architectures.
Dealing with the nocache/writethrough mess is for the next cycle;
fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am
sold on access_ok() in there, BTW; just not in this pile), same for
reducing __copy_... callsites, strn*... stuff, etc. - there will be a
pile about as large as this one in the next merge window.
This one sat in -next for weeks. -3KLoC"
* 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits)
HAVE_ARCH_HARDENED_USERCOPY is unconditional now
CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now
m32r: switch to RAW_COPY_USER
hexagon: switch to RAW_COPY_USER
microblaze: switch to RAW_COPY_USER
get rid of padding, switch to RAW_COPY_USER
ia64: get rid of copy_in_user()
ia64: sanitize __access_ok()
ia64: get rid of 'segment' argument of __do_{get,put}_user()
ia64: get rid of 'segment' argument of __{get,put}_user_check()
ia64: add extable.h
powerpc: get rid of zeroing, switch to RAW_COPY_USER
esas2r: don't open-code memdup_user()
alpha: fix stack smashing in old_adjtimex(2)
don't open-code kernel_setsockopt()
mips: switch to RAW_COPY_USER
mips: get rid of tail-zeroing in primitives
mips: make copy_from_user() zero tail explicitly
mips: clean and reorder the forest of macros...
mips: consolidate __invoke_... wrappers
...
Diffstat (limited to 'arch/s390/lib/uaccess.c')
-rw-r--r-- | arch/s390/lib/uaccess.c | 68 |
1 files changed, 23 insertions, 45 deletions
diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index f481fcde067b..1e5bb2b86c42 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" - "9: jz 7f\n" + "6: jz 4f\n" "1: algr %0,%3\n" " slgr %1,%3\n" " slgr %2,%3\n" @@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " slgr %4,%1\n" " clgr %0,%4\n" /* copy crosses next page boundary? */ - " jnh 4f\n" + " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" - "10:slgr %0,%4\n" - " algr %2,%4\n" - "4: lghi %4,-1\n" - " algr %4,%0\n" /* copy remaining size, subtract 1 */ - " bras %3,6f\n" /* memset loop */ - " xc 0(1,%2),0(%2)\n" - "5: xc 0(256,%2),0(%2)\n" - " la %2,256(%2)\n" - "6: aghi %4,-256\n" - " jnm 5b\n" - " ex %4,0(%3)\n" - " j 8f\n" - "7: slgr %0,%0\n" - "8:\n" - EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) + "7: slgr %0,%4\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; @@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, asm volatile( " sacf 0\n" "0: mvcp 0(%0,%2),0(%1),%3\n" - "10:jz 8f\n" + "7: jz 5f\n" "1: algr %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcp 0(%0,%2),0(%1),%3\n" - "11:jnz 1b\n" - " j 8f\n" + "8: jnz 1b\n" + " j 5f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ " lghi %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ " slgr %4,%1\n" " clgr %0,%4\n" /* copy crosses next page boundary? */ - " jnh 5f\n" + " jnh 6f\n" "4: mvcp 0(%4,%2),0(%1),%3\n" - "12:slgr %0,%4\n" - " algr %2,%4\n" - "5: lghi %4,-1\n" - " algr %4,%0\n" /* copy remaining size, subtract 1 */ - " bras %3,7f\n" /* memset loop */ - " xc 0(1,%2),0(%2)\n" - "6: xc 0(256,%2),0(%2)\n" - " la %2,256(%2)\n" - "7: aghi %4,-256\n" - " jnm 6b\n" - " ex %4,0(%3)\n" - " j 9f\n" - "8: slgr %0,%0\n" - "9: sacf 768\n" - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) - EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) + "9: slgr %0,%4\n" + " j 6f\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); return size; } -unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - check_object_size(to, n, false); if (static_branch_likely(&have_mvcos)) return copy_from_user_mvcos(to, from, n); return copy_from_user_mvcp(to, from, n); } -EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(raw_copy_from_user); static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, unsigned long size) @@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, return size; } -unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - check_object_size(from, n, true); if (static_branch_likely(&have_mvcos)) return copy_to_user_mvcos(to, from, n); return copy_to_user_mvcs(to, from, n); } -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_to_user); static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, unsigned long size) @@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user return size; } -unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) +unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { if (static_branch_likely(&have_mvcos)) return copy_in_user_mvcos(to, from, n); return copy_in_user_mvc(to, from, n); } -EXPORT_SYMBOL(__copy_in_user); +EXPORT_SYMBOL(raw_copy_in_user); static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) { |