summaryrefslogtreecommitdiffstats
path: root/arch/x86/lib
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-06-18 02:47:50 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2008-06-18 02:47:50 +0200
commit42a886af728c089df8da1b0017b0e7e6c81b5335 (patch)
tree3c896bd6f64b0107e4085073c30862b215fbc957 /arch/x86/lib
parentMerge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/gi... (diff)
downloadlinux-42a886af728c089df8da1b0017b0e7e6c81b5335.tar.xz
linux-42a886af728c089df8da1b0017b0e7e6c81b5335.zip
x86-64: Fix "bytes left to copy" return value for copy_from_user()
Most users by far do not care about the exact return value (they only really care about whether the copy succeeded in its entirety or not), but a few special core routines actually care deeply about exactly how many bytes were copied from user space. And the unrolled versions of the x86-64 user copy routines would sometimes report that it had copied more bytes than it actually had. Very few uses actually have partial copies to begin with, but to make this bug even harder to trigger, most x86 CPU's use the "rep string" instructions for normal user copies, and that version didn't have this issue. To make it even harder to hit, the one user of this that really cared about the return value (and used the uncached version of the copy that doesn't use the "rep string" instructions) was the generic write routine, which pre-populated its source, once more hiding the problem by avoiding the exception case that triggers the bug. In other words, very special thanks to Bron Gondwana who not only triggered this, but created a test-program to show it, and bisected the behavior down to commit 08291429cfa6258c4cd95d8833beb40f828b194e ("mm: fix pagecache write deadlocks") which changed the access pattern just enough that you can now trigger it with 'writev()' with multiple iovec's. That commit itself was not the cause of the bug, it just allowed all the stars to align just right that you could trigger the problem. [ Side note: this is just the minimal fix to make the copy routines (with __copy_from_user_inatomic_nocache as the particular version that was involved in showing this) have the right return values. We really should improve on the exceptional case further - to make the copy do a byte-accurate copy up to the exact page limit that causes it to fail. As it is, the callers have to do extra work to handle the limit case gracefully. ] Reported-by: Bron Gondwana <brong@fastmail.fm> Cc: Nick Piggin <npiggin@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Al Viro <viro@ZenIV.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> (which didn't have this problem), and since most users that do the carethis was very hard to trigger, but
Diffstat (limited to 'arch/x86/lib')
-rw-r--r--arch/x86/lib/copy_user_64.S25
-rw-r--r--arch/x86/lib/copy_user_nocache_64.S25
2 files changed, 22 insertions, 28 deletions
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 70bebd310408..ee1c3f635157 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -217,19 +217,19 @@ ENTRY(copy_user_generic_unrolled)
/* table sorted by exception address */
.section __ex_table,"a"
.align 8
- .quad .Ls1,.Ls1e
- .quad .Ls2,.Ls2e
- .quad .Ls3,.Ls3e
- .quad .Ls4,.Ls4e
- .quad .Ld1,.Ls1e
+ .quad .Ls1,.Ls1e /* Ls1-Ls4 have copied zero bytes */
+ .quad .Ls2,.Ls1e
+ .quad .Ls3,.Ls1e
+ .quad .Ls4,.Ls1e
+ .quad .Ld1,.Ls1e /* Ld1-Ld4 have copied 0-24 bytes */
.quad .Ld2,.Ls2e
.quad .Ld3,.Ls3e
.quad .Ld4,.Ls4e
- .quad .Ls5,.Ls5e
- .quad .Ls6,.Ls6e
- .quad .Ls7,.Ls7e
- .quad .Ls8,.Ls8e
- .quad .Ld5,.Ls5e
+ .quad .Ls5,.Ls5e /* Ls5-Ls8 have copied 32 bytes */
+ .quad .Ls6,.Ls5e
+ .quad .Ls7,.Ls5e
+ .quad .Ls8,.Ls5e
+ .quad .Ld5,.Ls5e /* Ld5-Ld8 have copied 32-56 bytes */
.quad .Ld6,.Ls6e
.quad .Ld7,.Ls7e
.quad .Ld8,.Ls8e
@@ -244,11 +244,8 @@ ENTRY(copy_user_generic_unrolled)
.quad .Le5,.Le_zero
.previous
- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
- pessimistic side. this is gross. it would be better to fix the
- interface. */
/* eax: zero, ebx: 64 */
-.Ls1e: addl $8,%eax
+.Ls1e: addl $8,%eax /* eax is bytes left uncopied within the loop (Ls1e: 64 .. Ls8e: 8) */
.Ls2e: addl $8,%eax
.Ls3e: addl $8,%eax
.Ls4e: addl $8,%eax
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
index 5196762b3b0e..9d3d1ab83763 100644
--- a/arch/x86/lib/copy_user_nocache_64.S
+++ b/arch/x86/lib/copy_user_nocache_64.S
@@ -145,19 +145,19 @@ ENTRY(__copy_user_nocache)
/* table sorted by exception address */
.section __ex_table,"a"
.align 8
- .quad .Ls1,.Ls1e
- .quad .Ls2,.Ls2e
- .quad .Ls3,.Ls3e
- .quad .Ls4,.Ls4e
- .quad .Ld1,.Ls1e
+ .quad .Ls1,.Ls1e /* .Ls[1-4] - 0 bytes copied */
+ .quad .Ls2,.Ls1e
+ .quad .Ls3,.Ls1e
+ .quad .Ls4,.Ls1e
+ .quad .Ld1,.Ls1e /* .Ld[1-4] - 0..24 bytes coped */
.quad .Ld2,.Ls2e
.quad .Ld3,.Ls3e
.quad .Ld4,.Ls4e
- .quad .Ls5,.Ls5e
- .quad .Ls6,.Ls6e
- .quad .Ls7,.Ls7e
- .quad .Ls8,.Ls8e
- .quad .Ld5,.Ls5e
+ .quad .Ls5,.Ls5e /* .Ls[5-8] - 32 bytes copied */
+ .quad .Ls6,.Ls5e
+ .quad .Ls7,.Ls5e
+ .quad .Ls8,.Ls5e
+ .quad .Ld5,.Ls5e /* .Ld[5-8] - 32..56 bytes copied */
.quad .Ld6,.Ls6e
.quad .Ld7,.Ls7e
.quad .Ld8,.Ls8e
@@ -172,11 +172,8 @@ ENTRY(__copy_user_nocache)
.quad .Le5,.Le_zero
.previous
- /* compute 64-offset for main loop. 8 bytes accuracy with error on the
- pessimistic side. this is gross. it would be better to fix the
- interface. */
/* eax: zero, ebx: 64 */
-.Ls1e: addl $8,%eax
+.Ls1e: addl $8,%eax /* eax: bytes left uncopied: Ls1e: 64 .. Ls8e: 8 */
.Ls2e: addl $8,%eax
.Ls3e: addl $8,%eax
.Ls4e: addl $8,%eax