summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib/usercopy_64.c
diff options
context:
space:
mode:
authorMikulas Patocka <mpatocka@redhat.com>2020-09-26 06:19:24 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-09-26 19:33:57 +0200
commita1cd6c2ae47ee10ff21e62475685d5b399e2ed4a (patch)
tree2d625ba56c1f32f28d421f308088d82fcec9feef /arch/x86/lib/usercopy_64.c
parentlib/memregion.c: include memregion.h (diff)
downloadlinux-a1cd6c2ae47ee10ff21e62475685d5b399e2ed4a.tar.xz
linux-a1cd6c2ae47ee10ff21e62475685d5b399e2ed4a.zip
arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback
If we copy less than 8 bytes and if the destination crosses a cache line, __copy_user_flushcache would invalidate only the first cache line. This patch makes it invalidate the second cache line as well. Fixes: 0aed55af88345b ("x86, uaccess: introduce copy_from_iter_flushcache for pmem / cache-bypass operations") Signed-off-by: Mikulas Patocka <mpatocka@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Dan Williams <dan.j.wiilliams@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Jeff Moyer <jmoyer@redhat.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Toshi Kani <toshi.kani@hpe.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Matthew Wilcox <mawilcox@microsoft.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: <stable@vger.kernel.org> Link: https://lkml.kernel.org/r/alpine.LRH.2.02.2009161451140.21915@file01.intranet.prod.int.rdu2.redhat.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/lib/usercopy_64.c')
-rw-r--r--arch/x86/lib/usercopy_64.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b0dfac3d3df7..1847e993ac63 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
*/
if (size < 8) {
if (!IS_ALIGNED(dest, 4) || size != 4)
- clean_cache_range(dst, 1);
+ clean_cache_range(dst, size);
} else {
if (!IS_ALIGNED(dest, 8)) {
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);