summaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/uaccess_64.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-02-25 08:22:20 +0100
committerIngo Molnar <mingo@elte.hu>2009-02-25 10:20:05 +0100
commit95108fa34a83ffd97e0af959e4b28d7c62008781 (patch)
tree06577270f81166d67b9058be1c04812b1ccf1058 /arch/x86/include/asm/uaccess_64.h
parentx86, mm: pass in 'total' to __copy_from_user_*nocache() (diff)
downloadlinux-95108fa34a83ffd97e0af959e4b28d7c62008781.tar.xz
linux-95108fa34a83ffd97e0af959e4b28d7c62008781.zip
x86: usercopy: check for total size when deciding non-temporal cutoff
Impact: make more types of copies non-temporal This change makes the following simple fix: 30d697f: x86: fix performance regression in write() syscall A bit more sophisticated: we check the 'total' number of bytes written to decide whether to copy in a cached or a non-temporal way. This will for example cause the tail (modulo 4096 bytes) chunk of a large write() to be non-temporal too - not just the page-sized chunks. Cc: Salman Qazi <sqazi@google.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/include/asm/uaccess_64.h')
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index a748253db0c9..dcaa0404cf7b 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -198,7 +198,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
* non-temporal stores here. Smaller writes get handled
* via regular __copy_from_user():
*/
- if (likely(size >= PAGE_SIZE))
+ if (likely(total >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 1);
else
return __copy_from_user(dst, src, size);
@@ -207,7 +207,7 @@ static inline int __copy_from_user_nocache(void *dst, const void __user *src,
static inline int __copy_from_user_inatomic_nocache(void *dst,
const void __user *src, unsigned size, unsigned total)
{
- if (likely(size >= PAGE_SIZE))
+ if (likely(total >= PAGE_SIZE))
return __copy_user_nocache(dst, src, size, 0);
else
return __copy_from_user_inatomic(dst, src, size);