diff options
author | Chris Zankel <chris@zankel.net> | 2012-11-17 01:16:20 +0100 |
---|---|---|
committer | Chris Zankel <chris@zankel.net> | 2012-12-19 06:10:20 +0100 |
commit | d1538c4675f37d0eeb34bd38bec798b3b29a5a7e (patch) | |
tree | 4fa2d2c539825ad99f2f677cef686ea0051fbe9f | |
parent | xtensa: make DoubleExceptionVector literals fit the gap (diff) | |
download | linux-d1538c4675f37d0eeb34bd38bec798b3b29a5a7e.tar.xz linux-d1538c4675f37d0eeb34bd38bec798b3b29a5a7e.zip |
xtensa: provide proper assembler function boundaries with ENDPROC()
Use ENDPROC() to mark the end of assembler functions.
Signed-off-by: Chris Zankel <chris@zankel.net>
-rw-r--r-- | arch/xtensa/kernel/align.S | 1 | ||||
-rw-r--r-- | arch/xtensa/kernel/coprocessor.S | 22 | ||||
-rw-r--r-- | arch/xtensa/kernel/entry.S | 30 | ||||
-rw-r--r-- | arch/xtensa/kernel/head.S | 14 | ||||
-rw-r--r-- | arch/xtensa/kernel/vectors.S | 51 | ||||
-rw-r--r-- | arch/xtensa/lib/checksum.S | 5 | ||||
-rw-r--r-- | arch/xtensa/mm/misc.S | 49 |
7 files changed, 147 insertions, 25 deletions
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index 934ae58e2c79..39d2f597382d 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -450,6 +450,7 @@ ENTRY(fast_unaligned) 1: movi a0, _user_exception jx a0 +ENDPROC(fast_unaligned) #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 54c3be313bfa..60bb13589249 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S @@ -43,10 +43,13 @@ /* IO protection is currently unsupported. */ ENTRY(fast_io_protect) + wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 +ENDPROC(fast_io_protect) + #if XTENSA_HAVE_COPROCESSORS /* @@ -139,6 +142,7 @@ ENTRY(fast_io_protect) */ ENTRY(coprocessor_save) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table @@ -150,7 +154,10 @@ ENTRY(coprocessor_save) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_save) + ENTRY(coprocessor_load) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lload_cp_regs_jump_table @@ -162,6 +169,8 @@ ENTRY(coprocessor_load) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_load) + /* * coprocessor_flush(struct task_info*, index) * a2 a3 @@ -178,6 +187,7 @@ ENTRY(coprocessor_load) ENTRY(coprocessor_flush) + entry a1, 32 s32i a0, a1, 0 movi a0, .Lsave_cp_regs_jump_table @@ -191,6 +201,8 @@ ENTRY(coprocessor_flush) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_flush) + ENTRY(coprocessor_restore) entry a1, 32 s32i a0, a1, 0 @@ -205,6 +217,8 @@ ENTRY(coprocessor_restore) 1: l32i a0, a1, 0 retw +ENDPROC(coprocessor_restore) + /* * Entry condition: * @@ -220,10 +234,12 @@ ENTRY(coprocessor_restore) */ ENTRY(fast_coprocessor_double) + wsr a0, excsave1 movi a0, unrecoverable_exception callx0 a0 +ENDPROC(fast_coprocessor_double) ENTRY(fast_coprocessor) @@ -327,9 +343,15 @@ ENTRY(fast_coprocessor) rfe +ENDPROC(fast_coprocessor) + .data + ENTRY(coprocessor_owner) + .fill XCHAL_CP_MAX, 4, 0 +END(coprocessor_owner) + #endif /* XTENSA_HAVE_COPROCESSORS */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 90bfc1dbc13d..41ad9cfe9a2a 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -219,6 +219,7 @@ _user_exception: j common_exception +ENDPROC(user_exception) /* * First-level exit handler for kernel exceptions @@ -641,6 +642,8 @@ common_exception_exit: l32i a1, a1, PT_AREG1 rfde +ENDPROC(kernel_exception) + /* * Debug exception handler. * @@ -701,6 +704,7 @@ ENTRY(debug_exception) /* Debug exception while in exception mode. */ 1: j 1b // FIXME!! +ENDPROC(debug_exception) /* * We get here in case of an unrecoverable exception. @@ -751,6 +755,7 @@ ENTRY(unrecoverable_exception) 1: j 1b +ENDPROC(unrecoverable_exception) /* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ @@ -929,6 +934,7 @@ ENTRY(fast_alloca) l32i a2, a2, PT_AREG2 rfe +ENDPROC(fast_alloca) /* * fast system calls. @@ -966,6 +972,8 @@ ENTRY(fast_syscall_kernel) j kernel_exception +ENDPROC(fast_syscall_kernel) + ENTRY(fast_syscall_user) /* Skip syscall. */ @@ -983,6 +991,8 @@ ENTRY(fast_syscall_user) j user_exception +ENDPROC(fast_syscall_user) + ENTRY(fast_syscall_unrecoverable) /* Restore all states. */ @@ -995,7 +1005,7 @@ ENTRY(fast_syscall_unrecoverable) movi a0, unrecoverable_exception callx0 a0 - +ENDPROC(fast_syscall_unrecoverable) /* * sysxtensa syscall handler @@ -1101,7 +1111,7 @@ CATCH movi a2, -EINVAL rfe - +ENDPROC(fast_syscall_xtensa) /* fast_syscall_spill_registers. @@ -1160,6 +1170,8 @@ ENTRY(fast_syscall_spill_registers) movi a2, 0 rfe +ENDPROC(fast_syscall_spill_registers) + /* Fixup handler. * * We get here if the spill routine causes an exception, e.g. tlb miss. @@ -1464,6 +1476,8 @@ ENTRY(_spill_registers) callx0 a0 # should not return 1: j 1b +ENDPROC(_spill_registers) + #ifdef CONFIG_MMU /* * We should never get here. Bail out! @@ -1475,6 +1489,8 @@ ENTRY(fast_second_level_miss_double_kernel) callx0 a0 # should not return 1: j 1b +ENDPROC(fast_second_level_miss_double_kernel) + /* First-level entry handler for user, kernel, and double 2nd-level * TLB miss exceptions. Note that for now, user and kernel miss * exceptions share the same entry point and are handled identically. @@ -1682,6 +1698,7 @@ ENTRY(fast_second_level_miss) j _kernel_exception 1: j _user_exception +ENDPROC(fast_second_level_miss) /* * StoreProhibitedException @@ -1777,6 +1794,9 @@ ENTRY(fast_store_prohibited) bbsi.l a2, PS_UM_BIT, 1f j _kernel_exception 1: j _user_exception + +ENDPROC(fast_store_prohibited) + #endif /* CONFIG_MMU */ /* @@ -1787,6 +1807,7 @@ ENTRY(fast_store_prohibited) */ ENTRY(system_call) + entry a1, 32 /* regs->syscall = regs->areg[2] */ @@ -1831,6 +1852,8 @@ ENTRY(system_call) callx4 a4 retw +ENDPROC(system_call) + /* * Task switch. @@ -1899,6 +1922,7 @@ ENTRY(_switch_to) retw +ENDPROC(_switch_to) ENTRY(ret_from_fork) @@ -1914,6 +1938,8 @@ ENTRY(ret_from_fork) j common_exception_return +ENDPROC(ret_from_fork) + /* * Kernel thread creation helper * On entry, set up by copy_thread: a2 = thread_fn, a3 = thread_fn arg diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index bdc50788f35e..417998c02108 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S @@ -47,16 +47,19 @@ */ __HEAD - .globl _start -_start: _j 2f +ENTRY(_start) + + _j 2f .align 4 1: .word _startup 2: l32r a0, 1b jx a0 +ENDPROC(_start) + .section .init.text, "ax" - .align 4 -_startup: + +ENTRY(_startup) /* Disable interrupts and exceptions. */ @@ -230,6 +233,7 @@ _startup: should_never_return: j should_never_return +ENDPROC(_startup) /* * BSS section @@ -239,6 +243,8 @@ __PAGE_ALIGNED_BSS #ifdef CONFIG_MMU ENTRY(swapper_pg_dir) .fill PAGE_SIZE, 1, 0 +END(swapper_pg_dir) #endif ENTRY(empty_zero_page) .fill PAGE_SIZE, 1, 0 +END(empty_zero_page) diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 3a57c15f7942..9365ee5064d3 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -79,6 +79,8 @@ ENTRY(_UserExceptionVector) l32i a0, a0, EXC_TABLE_FAST_USER # load handler jx a0 +ENDPROC(_UserExceptionVector) + /* * Kernel exception vector. (Exceptions with PS.UM == 0, PS.EXCM == 0) * @@ -103,6 +105,7 @@ ENTRY(_KernelExceptionVector) l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address jx a0 +ENDPROC(_KernelExceptionVector) /* * Double exception vector (Exceptions with PS.EXCM == 1) @@ -344,6 +347,7 @@ ENTRY(_DoubleExceptionVector) .end literal_prefix +ENDPROC(_DoubleExceptionVector) /* * Debug interrupt vector @@ -355,9 +359,11 @@ ENTRY(_DoubleExceptionVector) .section .DebugInterruptVector.text, "ax" ENTRY(_DebugInterruptVector) + xsr a0, SREG_EXCSAVE + XCHAL_DEBUGLEVEL jx a0 +ENDPROC(_DebugInterruptVector) /* Window overflow and underflow handlers. @@ -369,38 +375,43 @@ ENTRY(_DebugInterruptVector) * we try to access any page that would cause a page fault early. */ +#define ENTRY_ALIGN64(name) \ + .globl name; \ + .align 64; \ + name: + .section .WindowVectors.text, "ax" /* 4-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow4 -_WindowOverflow4: +ENTRY_ALIGN64(_WindowOverflow4) + s32e a0, a5, -16 s32e a1, a5, -12 s32e a2, a5, -8 s32e a3, a5, -4 rfwo +ENDPROC(_WindowOverflow4) + /* 4-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow4 -_WindowUnderflow4: +ENTRY_ALIGN64(_WindowUnderflow4) + l32e a0, a5, -16 l32e a1, a5, -12 l32e a2, a5, -8 l32e a3, a5, -4 rfwu +ENDPROC(_WindowUnderflow4) /* 8-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow8 -_WindowOverflow8: +ENTRY_ALIGN64(_WindowOverflow8) + s32e a0, a9, -16 l32e a0, a1, -12 s32e a2, a9, -8 @@ -412,11 +423,12 @@ _WindowOverflow8: s32e a7, a0, -20 rfwo +ENDPROC(_WindowOverflow8) + /* 8-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow8 -_WindowUnderflow8: +ENTRY_ALIGN64(_WindowUnderflow8) + l32e a1, a9, -12 l32e a0, a9, -16 l32e a7, a1, -12 @@ -428,12 +440,12 @@ _WindowUnderflow8: l32e a7, a7, -20 rfwu +ENDPROC(_WindowUnderflow8) /* 12-Register Window Overflow Vector (Handler) */ - .align 64 -.global _WindowOverflow12 -_WindowOverflow12: +ENTRY_ALIGN64(_WindowOverflow12) + s32e a0, a13, -16 l32e a0, a1, -12 s32e a1, a13, -12 @@ -449,11 +461,12 @@ _WindowOverflow12: s32e a11, a0, -20 rfwo +ENDPROC(_WindowOverflow12) + /* 12-Register Window Underflow Vector (Handler) */ - .align 64 -.global _WindowUnderflow12 -_WindowUnderflow12: +ENTRY_ALIGN64(_WindowUnderflow12) + l32e a1, a13, -12 l32e a0, a13, -16 l32e a11, a1, -12 @@ -469,6 +482,8 @@ _WindowUnderflow12: l32e a11, a11, -20 rfwu +ENDPROC(_WindowUnderflow12) + .text diff --git a/arch/xtensa/lib/checksum.S b/arch/xtensa/lib/checksum.S index df397f932d0e..0470ca21a359 100644 --- a/arch/xtensa/lib/checksum.S +++ b/arch/xtensa/lib/checksum.S @@ -170,7 +170,7 @@ ENTRY(csum_partial) 3: j 5b /* branch to handle the remaining byte */ - +ENDPROC(csum_partial) /* * Copy from ds while checksumming, otherwise like csum_partial @@ -211,6 +211,7 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst, int len, */ ENTRY(csum_partial_copy_generic) + entry sp, 32 mov a12, a3 mov a11, a4 @@ -367,6 +368,8 @@ DST( s8i a8, a3, 1 ) 6: j 4b /* process the possible trailing odd byte */ +ENDPROC(csum_partial_copy_generic) + # Exception handler: .section .fixup, "ax" diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index b048406d8756..7f7078f57c41 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -29,6 +29,7 @@ */ ENTRY(clear_page) + entry a1, 16 movi a3, 0 @@ -45,6 +46,8 @@ ENTRY(clear_page) retw +ENDPROC(clear_page) + /* * copy_page and copy_user_page are the same for non-cache-aliased configs. * @@ -53,6 +56,7 @@ ENTRY(clear_page) */ ENTRY(copy_page) + entry a1, 16 __loopi a2, a4, PAGE_SIZE, 32 @@ -84,6 +88,8 @@ ENTRY(copy_page) retw +ENDPROC(copy_page) + #ifdef CONFIG_MMU /* * If we have to deal with cache aliasing, we use temporary memory mappings @@ -109,6 +115,7 @@ ENTRY(__tlbtemp_mapping_start) */ ENTRY(clear_user_page) + entry a1, 32 /* Mark page dirty and determine alias. */ @@ -164,6 +171,8 @@ ENTRY(clear_user_page) retw +ENDPROC(clear_user_page) + /* * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) * a2 a3 a4 a5 @@ -262,6 +271,8 @@ ENTRY(copy_user_page) retw +ENDPROC(copy_user_page) + #endif #if (DCACHE_WAY_SIZE > PAGE_SIZE) @@ -272,6 +283,7 @@ ENTRY(copy_user_page) */ ENTRY(__flush_invalidate_dcache_page_alias) + entry sp, 16 movi a7, 0 # required for exception handler @@ -287,6 +299,7 @@ ENTRY(__flush_invalidate_dcache_page_alias) retw +ENDPROC(__flush_invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) @@ -294,6 +307,7 @@ ENTRY(__tlbtemp_mapping_itlb) #if (ICACHE_WAY_SIZE > PAGE_SIZE) ENTRY(__invalidate_icache_page_alias) + entry sp, 16 addi a6, a3, (PAGE_KERNEL_EXEC | _PAGE_HW_WRITE) @@ -307,11 +321,14 @@ ENTRY(__invalidate_icache_page_alias) isync retw +ENDPROC(__invalidate_icache_page_alias) + #endif /* End of special treatment in tlb miss exception */ ENTRY(__tlbtemp_mapping_end) + #endif /* CONFIG_MMU /* @@ -319,6 +336,7 @@ ENTRY(__tlbtemp_mapping_end) */ ENTRY(__invalidate_icache_page) + entry sp, 16 ___invalidate_icache_page a2 a3 @@ -326,11 +344,14 @@ ENTRY(__invalidate_icache_page) retw +ENDPROC(__invalidate_icache_page) + /* * void __invalidate_dcache_page(ulong start) */ ENTRY(__invalidate_dcache_page) + entry sp, 16 ___invalidate_dcache_page a2 a3 @@ -338,11 +359,14 @@ ENTRY(__invalidate_dcache_page) retw +ENDPROC(__invalidate_dcache_page) + /* * void __flush_invalidate_dcache_page(ulong start) */ ENTRY(__flush_invalidate_dcache_page) + entry sp, 16 ___flush_invalidate_dcache_page a2 a3 @@ -350,11 +374,14 @@ ENTRY(__flush_invalidate_dcache_page) dsync retw +ENDPROC(__flush_invalidate_dcache_page) + /* * void __flush_dcache_page(ulong start) */ ENTRY(__flush_dcache_page) + entry sp, 16 ___flush_dcache_page a2 a3 @@ -362,11 +389,14 @@ ENTRY(__flush_dcache_page) dsync retw +ENDPROC(__flush_dcache_page) + /* * void __invalidate_icache_range(ulong start, ulong size) */ ENTRY(__invalidate_icache_range) + entry sp, 16 ___invalidate_icache_range a2 a3 a4 @@ -374,11 +404,14 @@ ENTRY(__invalidate_icache_range) retw +ENDPROC(__invalidate_icache_range) + /* * void __flush_invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__flush_invalidate_dcache_range) + entry sp, 16 ___flush_invalidate_dcache_range a2 a3 a4 @@ -386,11 +419,14 @@ ENTRY(__flush_invalidate_dcache_range) retw +ENDPROC(__flush_invalidate_dcache_range) + /* * void _flush_dcache_range(ulong start, ulong size) */ ENTRY(__flush_dcache_range) + entry sp, 16 ___flush_dcache_range a2 a3 a4 @@ -398,22 +434,28 @@ ENTRY(__flush_dcache_range) retw +ENDPROC(__flush_dcache_range) + /* * void _invalidate_dcache_range(ulong start, ulong size) */ ENTRY(__invalidate_dcache_range) + entry sp, 16 ___invalidate_dcache_range a2 a3 a4 retw +ENDPROC(__invalidate_dcache_range) + /* * void _invalidate_icache_all(void) */ ENTRY(__invalidate_icache_all) + entry sp, 16 ___invalidate_icache_all a2 a3 @@ -421,11 +463,14 @@ ENTRY(__invalidate_icache_all) retw +ENDPROC(__invalidate_icache_all) + /* * void _flush_invalidate_dcache_all(void) */ ENTRY(__flush_invalidate_dcache_all) + entry sp, 16 ___flush_invalidate_dcache_all a2 a3 @@ -433,11 +478,14 @@ ENTRY(__flush_invalidate_dcache_all) retw +ENDPROC(__flush_invalidate_dcache_all) + /* * void _invalidate_dcache_all(void) */ ENTRY(__invalidate_dcache_all) + entry sp, 16 ___invalidate_dcache_all a2 a3 @@ -445,3 +493,4 @@ ENTRY(__invalidate_dcache_all) retw +ENDPROC(__invalidate_dcache_all) |