diff options
author | Ulrich Weigand <ulrich.weigand@de.ibm.com> | 2014-02-14 19:21:03 +0100 |
---|---|---|
committer | Anton Blanchard <anton@samba.org> | 2014-04-23 02:05:24 +0200 |
commit | 752a6422fec3c0f5f9d4ac43d92f5dd13e22fde4 (patch) | |
tree | 6ef91c1ad3c067345ce45bb6d7730ab9f38c9241 /arch/powerpc/lib/memcpy_power7.S | |
parent | powerpc: Fix ABIv2 issues with stack offsets in assembly code (diff) | |
download | linux-752a6422fec3c0f5f9d4ac43d92f5dd13e22fde4.tar.xz linux-752a6422fec3c0f5f9d4ac43d92f5dd13e22fde4.zip |
powerpc: Fix unsafe accesses to parameter area in ELFv2
Some of the assembler files in lib/ make use of the fact that in the
ELFv1 ABI, the caller guarantees to provide stack space to save the
parameter registers r3 ... r10. This guarantee is no longer present
in ELFv2 for functions that have no variable argument list and no
more than 8 arguments.
Change the affected routines to temporarily store registers in the
red zone and/or the top of their own stack frame (in the space
provided to save r31 .. r29, which is actually not used in these
routines).
In opal_query_takeover, simply always allocate a stack frame;
the routine is not performance critical.
Signed-off-by: Ulrich Weigand <ulrich.weigand@de.ibm.com>
Signed-off-by: Anton Blanchard <anton@samba.org>
Diffstat (limited to 'arch/powerpc/lib/memcpy_power7.S')
-rw-r--r-- | arch/powerpc/lib/memcpy_power7.S | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S index 87d8eeccd4b7..2ff5c142f87b 100644 --- a/arch/powerpc/lib/memcpy_power7.S +++ b/arch/powerpc/lib/memcpy_power7.S @@ -33,14 +33,14 @@ _GLOBAL(memcpy_power7) cmpldi r5,16 cmpldi cr1,r5,4096 - std r3,STK_PARAM(R1)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy bgt cr1,.Lvmx_copy #else cmpldi r5,16 - std r3,STK_PARAM(R1)(r1) + std r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blt .Lshort_copy #endif @@ -216,7 +216,7 @@ _GLOBAL(memcpy_power7) lbz r0,0(r4) stb r0,0(r3) -15: ld r3,STK_PARAM(R3)(r1) +15: ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) blr .Lunwind_stack_nonvmx_copy: @@ -226,16 +226,16 @@ _GLOBAL(memcpy_power7) #ifdef CONFIG_ALTIVEC .Lvmx_copy: mflr r0 - std r4,STK_PARAM(R4)(r1) - std r5,STK_PARAM(R5)(r1) + std r4,-STACKFRAMESIZE+STK_REG(R30)(r1) + std r5,-STACKFRAMESIZE+STK_REG(R29)(r1) std r0,16(r1) stdu r1,-STACKFRAMESIZE(r1) bl enter_vmx_copy cmpwi cr1,r3,0 ld r0,STACKFRAMESIZE+16(r1) - ld r3,STACKFRAMESIZE+STK_PARAM(R3)(r1) - ld r4,STACKFRAMESIZE+STK_PARAM(R4)(r1) - ld r5,STACKFRAMESIZE+STK_PARAM(R5)(r1) + ld r3,STK_REG(R31)(r1) + ld r4,STK_REG(R30)(r1) + ld r5,STK_REG(R29)(r1) mtlr r0 /* @@ -447,7 +447,7 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,STK_PARAM(R3)(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_copy /* tail call optimise */ .Lvmx_unaligned_copy: @@ -651,6 +651,6 @@ _GLOBAL(memcpy_power7) stb r0,0(r3) 15: addi r1,r1,STACKFRAMESIZE - ld r3,STK_PARAM(R3)(r1) + ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) b exit_vmx_copy /* tail call optimise */ #endif /* CONFiG_ALTIVEC */ |