diff options
author | Max Filippov <jcmvbkbc@gmail.com> | 2019-05-13 05:28:25 +0200 |
---|---|---|
committer | Max Filippov <jcmvbkbc@gmail.com> | 2019-07-08 19:04:48 +0200 |
commit | d6d5f19e21d98c0607ff029e4e2e508d4cdd1d5a (patch) | |
tree | 1780d36ac99c16f8ed506948a020017e84a89a9f /arch/xtensa/lib/memcopy.S | |
parent | xtensa: One function call less in bootmem_init() (diff) | |
download | linux-d6d5f19e21d98c0607ff029e4e2e508d4cdd1d5a.tar.xz linux-d6d5f19e21d98c0607ff029e4e2e508d4cdd1d5a.zip |
xtensa: abstract 'entry' and 'retw' in assembly code
Provide abi_entry, abi_entry_default, abi_ret and abi_ret_default macros
that allocate aligned stack frame in windowed and call0 ABIs.
Provide XTENSA_SPILL_STACK_RESERVE macro that specifies required stack
frame size when register spilling is involved.
Replace all uses of 'entry' and 'retw' with the above macros.
This makes most of the xtensa assembly code ready for XEA3 and call0 ABI.
Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Diffstat (limited to 'arch/xtensa/lib/memcopy.S')
-rw-r--r-- | arch/xtensa/lib/memcopy.S | 38 |
1 files changed, 19 insertions, 19 deletions
diff --git a/arch/xtensa/lib/memcopy.S b/arch/xtensa/lib/memcopy.S index efecfd7ed8cc..582d817979ed 100644 --- a/arch/xtensa/lib/memcopy.S +++ b/arch/xtensa/lib/memcopy.S @@ -79,7 +79,7 @@ bne a3, a7, .Lnextbyte # continue loop if $a3:src != $a7:src_end #endif /* !XCHAL_HAVE_LOOPS */ .Lbytecopydone: - retw + abi_ret_default /* * Destination is unaligned @@ -112,7 +112,7 @@ ENTRY(__memcpy) WEAK(memcpy) - entry sp, 16 # minimal stack frame + abi_entry_default # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lcommon: @@ -161,7 +161,7 @@ WEAK(memcpy) bbsi.l a4, 2, .L3 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 - retw + abi_ret_default .L3: # copy 4 bytes l32i a6, a3, 0 @@ -170,7 +170,7 @@ WEAK(memcpy) addi a5, a5, 4 bbsi.l a4, 1, .L4 bbsi.l a4, 0, .L5 - retw + abi_ret_default .L4: # copy 2 bytes l16ui a6, a3, 0 @@ -178,12 +178,12 @@ WEAK(memcpy) s16i a6, a5, 0 addi a5, a5, 2 bbsi.l a4, 0, .L5 - retw + abi_ret_default .L5: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 - retw + abi_ret_default /* * Destination is aligned, Source is unaligned @@ -255,7 +255,7 @@ WEAK(memcpy) #endif bbsi.l a4, 1, .L14 bbsi.l a4, 0, .L15 -.Ldone: retw +.Ldone: abi_ret_default .L14: # copy 2 bytes l8ui a6, a3, 0 @@ -265,12 +265,12 @@ WEAK(memcpy) s8i a7, a5, 1 addi a5, a5, 2 bbsi.l a4, 0, .L15 - retw + abi_ret_default .L15: # copy 1 byte l8ui a6, a3, 0 s8i a6, a5, 0 - retw + abi_ret_default ENDPROC(__memcpy) @@ -280,7 +280,7 @@ ENDPROC(__memcpy) ENTRY(bcopy) - entry sp, 16 # minimal stack frame + abi_entry_default # a2=src, a3=dst, a4=len mov a5, a3 mov a3, a2 @@ -346,7 +346,7 @@ ENDPROC(bcopy) # $a3:src != $a7:src_start #endif /* !XCHAL_HAVE_LOOPS */ .Lbackbytecopydone: - retw + abi_ret_default /* * Destination is unaligned @@ -380,7 +380,7 @@ ENDPROC(bcopy) ENTRY(__memmove) WEAK(memmove) - entry sp, 16 # minimal stack frame + abi_entry_default # a2/ dst, a3/ src, a4/ len mov a5, a2 # copy dst so that a2 is return value .Lmovecommon: @@ -435,7 +435,7 @@ WEAK(memmove) bbsi.l a4, 2, .Lback3 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 - retw + abi_ret_default .Lback3: # copy 4 bytes addi a3, a3, -4 @@ -444,7 +444,7 @@ WEAK(memmove) s32i a6, a5, 0 bbsi.l a4, 1, .Lback4 bbsi.l a4, 0, .Lback5 - retw + abi_ret_default .Lback4: # copy 2 bytes addi a3, a3, -2 @@ -452,14 +452,14 @@ WEAK(memmove) addi a5, a5, -2 s16i a6, a5, 0 bbsi.l a4, 0, .Lback5 - retw + abi_ret_default .Lback5: # copy 1 byte addi a3, a3, -1 l8ui a6, a3, 0 addi a5, a5, -1 s8i a6, a5, 0 - retw + abi_ret_default /* * Destination is aligned, Source is unaligned @@ -531,7 +531,7 @@ WEAK(memmove) bbsi.l a4, 1, .Lback14 bbsi.l a4, 0, .Lback15 .Lbackdone: - retw + abi_ret_default .Lback14: # copy 2 bytes addi a3, a3, -2 @@ -541,13 +541,13 @@ WEAK(memmove) s8i a6, a5, 0 s8i a7, a5, 1 bbsi.l a4, 0, .Lback15 - retw + abi_ret_default .Lback15: # copy 1 byte addi a3, a3, -1 addi a5, a5, -1 l8ui a6, a3, 0 s8i a6, a5, 0 - retw + abi_ret_default ENDPROC(__memmove) |