diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 01:24:13 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-13 01:24:13 +0200 |
commit | c90578360c92c71189308ebc71087197080e94c3 (patch) | |
tree | 15cccf727f6fe35ffd81922461996c1c2ca1ebfd /arch/alpha | |
parent | Merge tag 'docs-5.10' of git://git.lwn.net/linux (diff) | |
parent | ppc: propagate the calling conventions change down to csum_partial_copy_gener... (diff) | |
download | linux-c90578360c92c71189308ebc71087197080e94c3.tar.xz linux-c90578360c92c71189308ebc71087197080e94c3.zip |
Merge branch 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull copy_and_csum cleanups from Al Viro:
"Saner calling conventions for csum_and_copy_..._user() and friends"
[ Removing 800+ lines of code and cleaning stuff up is good - Linus ]
* 'work.csum_and_copy' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
ppc: propagate the calling conventions change down to csum_partial_copy_generic()
amd64: switch csum_partial_copy_generic() to new calling conventions
sparc64: propagate the calling convention changes down to __csum_partial_copy_...()
xtensa: propagate the calling conventions change down into csum_partial_copy_generic()
mips: propagate the calling convention change down into __csum_partial_copy_..._user()
mips: __csum_partial_copy_kernel() has no users left
mips: csum_and_copy_{to,from}_user() are never called under KERNEL_DS
sparc32: propagate the calling conventions change down to __csum_partial_copy_sparc_generic()
i386: propagate the calling conventions change down to csum_partial_copy_generic()
sh: propage the calling conventions change down to csum_partial_copy_generic()
m68k: get rid of zeroing destination on error in csum_and_copy_from_user()
arm: propagate the calling convention changes down to csum_partial_copy_from_user()
alpha: propagate the calling convention changes down to csum_partial_copy.c helpers
saner calling conventions for csum_and_copy_..._user()
csum_and_copy_..._user(): pass 0xffffffff instead of 0 as initial sum
csum_partial_copy_nocheck(): drop the last argument
unify generic instances of csum_partial_copy_nocheck()
icmp_push_reply(): reorder adding the checksum up
skb_copy_and_csum_bits(): don't bother with the last argument
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/include/asm/checksum.h | 5 | ||||
-rw-r--r-- | arch/alpha/lib/csum_partial_copy.c | 164 |
2 files changed, 74 insertions, 95 deletions
diff --git a/arch/alpha/include/asm/checksum.h b/arch/alpha/include/asm/checksum.h index 0eac81624d01..99d631e146b2 100644 --- a/arch/alpha/include/asm/checksum.h +++ b/arch/alpha/include/asm/checksum.h @@ -42,9 +42,10 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); * better 64-bit) boundary */ #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER -__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *errp); +#define _HAVE_ARCH_CSUM_AND_COPY +__wsum csum_and_copy_from_user(const void __user *src, void *dst, int len); -__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); +__wsum csum_partial_copy_nocheck(const void *src, void *dst, int len); /* diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index af1dad74e933..dc68efbe9367 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -39,12 +39,11 @@ __asm__ __volatile__("insql %1,%2,%0":"=r" (z):"r" (x),"r" (y)) #define insqh(x,y,z) \ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) - -#define __get_user_u(x,ptr) \ +#define __get_word(insn,x,ptr) \ ({ \ long __guu_err; \ __asm__ __volatile__( \ - "1: ldq_u %0,%2\n" \ + "1: "#insn" %0,%2\n" \ "2:\n" \ EXC(1b,2b,%0,%1) \ : "=r"(x), "=r"(__guu_err) \ @@ -52,19 +51,6 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) __guu_err; \ }) -#define __put_user_u(x,ptr) \ -({ \ - long __puu_err; \ - __asm__ __volatile__( \ - "1: stq_u %2,%1\n" \ - "2:\n" \ - EXC(1b,2b,$31,%0) \ - : "=r"(__puu_err) \ - : "m"(__m(addr)), "rJ"(x), "0"(0)); \ - __puu_err; \ -}) - - static inline unsigned short from64to16(unsigned long x) { /* Using extract instructions is a bit more efficient @@ -95,15 +81,15 @@ static inline unsigned short from64to16(unsigned long x) */ static inline unsigned long csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, - long len, unsigned long checksum, - int *errp) + long len) { + unsigned long checksum = ~0U; unsigned long carry = 0; - int err = 0; while (len >= 0) { unsigned long word; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; checksum += carry; src++; checksum += word; @@ -116,7 +102,8 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, checksum += carry; if (len) { unsigned long word, tmp; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; tmp = *dst; mskql(word, len, word); checksum += word; @@ -125,7 +112,6 @@ csum_partial_cfu_aligned(const unsigned long __user *src, unsigned long *dst, *dst = word | tmp; checksum += carry; } - if (err && errp) *errp = err; return checksum; } @@ -137,20 +123,21 @@ static inline unsigned long csum_partial_cfu_dest_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long soff, - long len, unsigned long checksum, - int *errp) + long len) { unsigned long first; unsigned long word, carry; unsigned long lastsrc = 7+len+(unsigned long)src; - int err = 0; + unsigned long checksum = ~0U; - err |= __get_user_u(first,src); + if (__get_word(ldq_u, first,src)) + return 0; carry = 0; while (len >= 0) { unsigned long second; - err |= __get_user_u(second, src+1); + if (__get_word(ldq_u, second, src+1)) + return 0; extql(first, soff, word); len -= 8; src++; @@ -168,7 +155,8 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src, if (len) { unsigned long tmp; unsigned long second; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; tmp = *dst; extql(first, soff, word); extqh(second, soff, first); @@ -180,7 +168,6 @@ csum_partial_cfu_dest_aligned(const unsigned long __user *src, *dst = word | tmp; checksum += carry; } - if (err && errp) *errp = err; return checksum; } @@ -191,18 +178,18 @@ static inline unsigned long csum_partial_cfu_src_aligned(const unsigned long __user *src, unsigned long *dst, unsigned long doff, - long len, unsigned long checksum, - unsigned long partial_dest, - int *errp) + long len, + unsigned long partial_dest) { unsigned long carry = 0; unsigned long word; unsigned long second_dest; - int err = 0; + unsigned long checksum = ~0U; mskql(partial_dest, doff, partial_dest); while (len >= 0) { - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; len -= 8; insql(word, doff, second_dest); checksum += carry; @@ -216,7 +203,8 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src, len += 8; if (len) { checksum += carry; - err |= __get_user(word, src); + if (__get_word(ldq, word, src)) + return 0; mskql(word, len, word); len -= 8; checksum += word; @@ -237,7 +225,6 @@ csum_partial_cfu_src_aligned(const unsigned long __user *src, stq_u(partial_dest | second_dest, dst); out: checksum += carry; - if (err && errp) *errp = err; return checksum; } @@ -249,23 +236,23 @@ static inline unsigned long csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long * dst, unsigned long soff, unsigned long doff, - long len, unsigned long checksum, - unsigned long partial_dest, - int *errp) + long len, unsigned long partial_dest) { unsigned long carry = 0; unsigned long first; unsigned long lastsrc; - int err = 0; + unsigned long checksum = ~0U; - err |= __get_user_u(first, src); + if (__get_word(ldq_u, first, src)) + return 0; lastsrc = 7+len+(unsigned long)src; mskql(partial_dest, doff, partial_dest); while (len >= 0) { unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, src+1); + if (__get_word(ldq_u, second, src+1)) + return 0; extql(first, soff, word); checksum += carry; len -= 8; @@ -286,7 +273,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; @@ -307,7 +295,8 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, unsigned long second, word; unsigned long second_dest; - err |= __get_user_u(second, lastsrc); + if (__get_word(ldq_u, second, lastsrc)) + return 0; extql(first, soff, word); extqh(second, soff, first); word |= first; @@ -320,66 +309,55 @@ csum_partial_cfu_unaligned(const unsigned long __user * src, stq_u(partial_dest | word | second_dest, dst); checksum += carry; } - if (err && errp) *errp = err; return checksum; } -__wsum -csum_and_copy_from_user(const void __user *src, void *dst, int len, - __wsum sum, int *errp) +static __wsum __csum_and_copy(const void __user *src, void *dst, int len) { - unsigned long checksum = (__force u32) sum; unsigned long soff = 7 & (unsigned long) src; unsigned long doff = 7 & (unsigned long) dst; - - if (len) { - if (!access_ok(src, len)) { - if (errp) *errp = -EFAULT; - memset(dst, 0, len); - return sum; - } - if (!doff) { - if (!soff) - checksum = csum_partial_cfu_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - len-8, checksum, errp); - else - checksum = csum_partial_cfu_dest_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, len-8, checksum, errp); - } else { - unsigned long partial_dest; - ldq_u(partial_dest, dst); - if (!soff) - checksum = csum_partial_cfu_src_aligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - doff, len-8, checksum, - partial_dest, errp); - else - checksum = csum_partial_cfu_unaligned( - (const unsigned long __user *) src, - (unsigned long *) dst, - soff, doff, len-8, checksum, - partial_dest, errp); - } - checksum = from64to16 (checksum); + unsigned long checksum; + + if (!doff) { + if (!soff) + checksum = csum_partial_cfu_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, len-8); + else + checksum = csum_partial_cfu_dest_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + soff, len-8); + } else { + unsigned long partial_dest; + ldq_u(partial_dest, dst); + if (!soff) + checksum = csum_partial_cfu_src_aligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + doff, len-8, partial_dest); + else + checksum = csum_partial_cfu_unaligned( + (const unsigned long __user *) src, + (unsigned long *) dst, + soff, doff, len-8, partial_dest); } - return (__force __wsum)checksum; + return (__force __wsum)from64to16 (checksum); +} + +__wsum +csum_and_copy_from_user(const void __user *src, void *dst, int len) +{ + if (!access_ok(src, len)) + return 0; + return __csum_and_copy(src, dst, len); } EXPORT_SYMBOL(csum_and_copy_from_user); __wsum -csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum) +csum_partial_copy_nocheck(const void *src, void *dst, int len) { - __wsum checksum; - mm_segment_t oldfs = get_fs(); - set_fs(KERNEL_DS); - checksum = csum_and_copy_from_user((__force const void __user *)src, - dst, len, sum, NULL); - set_fs(oldfs); - return checksum; + return __csum_and_copy((__force const void __user *)src, + dst, len); } EXPORT_SYMBOL(csum_partial_copy_nocheck); |