summaryrefslogtreecommitdiffstats
path: root/arch/riscv/lib/uaccess.S
diff options
context:
space:
mode:
authorAkira Tsukamoto <akira.tsukamoto@gmail.com>2021-07-20 10:50:52 +0200
committerPalmer Dabbelt <palmerdabbelt@google.com>2021-07-24 02:48:52 +0200
commit6010d300f9f7e16d1bf327b4730bcd0c0886d9e6 (patch)
treecb9a13e091564601e86ccc04f4e5424a1399cb27 /arch/riscv/lib/uaccess.S
parentriscv: stacktrace: pin the task's stack in get_wchan (diff)
downloadlinux-6010d300f9f7e16d1bf327b4730bcd0c0886d9e6.tar.xz
linux-6010d300f9f7e16d1bf327b4730bcd0c0886d9e6.zip
riscv: __asm_copy_to-from_user: Fix: overrun copy
There were two causes for the overrun memory access. The threshold size was too small. The aligning dst require one SZREG and unrolling word copy requires 8*SZREG, total have to be at least 9*SZREG. Inside the unrolling copy, the subtracting -(8*SZREG-1) would make iteration happening one extra loop. Proper value is -(8*SZREG). Signed-off-by: Akira Tsukamoto <akira.tsukamoto@gmail.com> Fixes: ca6eaaa210de ("riscv: __asm_copy_to-from_user: Optimize unaligned memory access and pipeline stall") Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
Diffstat (limited to '')
-rw-r--r--arch/riscv/lib/uaccess.S6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index bceb0629e440..8bbeca89a93f 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -35,7 +35,7 @@ ENTRY(__asm_copy_from_user)
/*
* Use byte copy only if too small.
*/
- li a3, 8*SZREG /* size must be larger than size in word_copy */
+ li a3, 9*SZREG /* size must be larger than size in word_copy */
bltu a2, a3, .Lbyte_copy_tail
/*
@@ -75,7 +75,7 @@ ENTRY(__asm_copy_from_user)
* a3 - a1 & mask:(SZREG-1)
* t0 - end of aligned dst
*/
- addi t0, t0, -(8*SZREG-1) /* not to over run */
+ addi t0, t0, -(8*SZREG) /* not to over run */
2:
fixup REG_L a4, 0(a1), 10f
fixup REG_L a5, SZREG(a1), 10f
@@ -97,7 +97,7 @@ ENTRY(__asm_copy_from_user)
addi a1, a1, 8*SZREG
bltu a0, t0, 2b
- addi t0, t0, 8*SZREG-1 /* revert to original value */
+ addi t0, t0, 8*SZREG /* revert to original value */
j .Lbyte_copy_tail
.Lshift_copy: