summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/entry.S
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2021-06-07 11:46:17 +0200
committerWill Deacon <will@kernel.org>2021-06-07 12:35:55 +0200
commita5b43a87a7609d49ed4a453a2b99b6d36ab1e5d0 (patch)
tree0208c5b10efffbb52bea2fb5f678bdb23137b05f /arch/arm64/kernel/entry.S
parentarm64: entry: improve bad_mode() (diff)
downloadlinux-a5b43a87a7609d49ed4a453a2b99b6d36ab1e5d0.tar.xz
linux-a5b43a87a7609d49ed4a453a2b99b6d36ab1e5d0.zip
arm64: entry: template the entry asm functions
Now that the majority of the exception triage logic has been converted to C, the entry assembly functions all have a uniform structure. Let's generate them all with an assembly macro to reduce the amount of code and to ensure they all remain in sync if we make changes in future. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Marc Zyngier <maz@kernel.org> Reviewed-by: Joey Gouly <joey.gouly@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20210607094624.34689-14-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'arch/arm64/kernel/entry.S')
-rw-r--r--arch/arm64/kernel/entry.S113
1 files changed, 27 insertions, 86 deletions
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 54986d488983..b719ac26f7d1 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -604,102 +604,43 @@ SYM_CODE_START_LOCAL(el1_error_invalid)
inv_entry 1, BAD_ERROR
SYM_CODE_END(el1_error_invalid)
-/*
- * EL1 mode handlers.
- */
-SYM_CODE_START_LOCAL(el1_sync)
- kernel_entry 1
- mov x0, sp
- bl el1_sync_handler
- b ret_to_kernel
-SYM_CODE_END(el1_sync)
-
-SYM_CODE_START_LOCAL(el1_irq)
- kernel_entry 1
- mov x0, sp
- bl el1_irq_handler
- b ret_to_kernel
-SYM_CODE_END(el1_irq)
-
-SYM_CODE_START_LOCAL(el1_fiq)
- kernel_entry 1
- mov x0, sp
- bl el1_fiq_handler
- b ret_to_kernel
-SYM_CODE_END(el1_fiq)
-
-SYM_CODE_START_LOCAL(el1_error)
- kernel_entry 1
+ .macro entry_handler el:req, regsize:req, label:req
+SYM_CODE_START_LOCAL(el\el\()_\label)
+ kernel_entry \el, \regsize
mov x0, sp
- bl el1_error_handler
+ bl el\el\()_\label\()_handler
+ .if \el == 0
+ b ret_to_user
+ .else
b ret_to_kernel
-SYM_CODE_END(el1_error)
-
-SYM_CODE_START_LOCAL(ret_to_kernel)
- kernel_exit 1
-SYM_CODE_END(ret_to_kernel)
+ .endif
+SYM_CODE_END(el\el\()_\label)
+ .endm
/*
- * EL0 mode handlers.
+ * Early exception handlers
*/
-SYM_CODE_START_LOCAL(el0_sync)
- kernel_entry 0
- mov x0, sp
- bl el0_sync_handler
- b ret_to_user
-SYM_CODE_END(el0_sync)
+ entry_handler 1, 64, sync
+ entry_handler 1, 64, irq
+ entry_handler 1, 64, fiq
+ entry_handler 1, 64, error
-SYM_CODE_START_LOCAL(el0_irq)
- kernel_entry 0
- mov x0, sp
- bl el0_irq_handler
- b ret_to_user
-SYM_CODE_END(el0_irq)
-
-SYM_CODE_START_LOCAL(el0_fiq)
- kernel_entry 0
- mov x0, sp
- bl el0_fiq_handler
- b ret_to_user
-SYM_CODE_END(el0_fiq)
-
-SYM_CODE_START_LOCAL(el0_error)
- kernel_entry 0
- mov x0, sp
- bl el0_error_handler
- b ret_to_user
-SYM_CODE_END(el0_error)
+ entry_handler 0, 64, sync
+ entry_handler 0, 64, irq
+ entry_handler 0, 64, fiq
+ entry_handler 0, 64, error
#ifdef CONFIG_COMPAT
-SYM_CODE_START_LOCAL(el0_sync_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_sync_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_sync_compat)
-
-SYM_CODE_START_LOCAL(el0_irq_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_irq_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_irq_compat)
-
-SYM_CODE_START_LOCAL(el0_fiq_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_fiq_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_fiq_compat)
-
-SYM_CODE_START_LOCAL(el0_error_compat)
- kernel_entry 0, 32
- mov x0, sp
- bl el0_error_compat_handler
- b ret_to_user
-SYM_CODE_END(el0_error_compat)
+ entry_handler 0, 32, sync_compat
+ entry_handler 0, 32, irq_compat
+ entry_handler 0, 32, fiq_compat
+ entry_handler 0, 32, error_compat
#endif
+SYM_CODE_START_LOCAL(ret_to_kernel)
+ kernel_exit 1
+SYM_CODE_END(ret_to_kernel)
+
/*
* "slow" syscall return path.
*/