summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 01:35:57 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2022-03-24 01:35:57 +0100
commit9c0e6a89b592f4c4e4d769dbc22d399ab0685159 (patch)
tree54865d08ede844e868b3403670a9a91ad24bba82 /arch/arm/kernel
parentMerge tag 'm68knommu-for-v5.18' of git://git.kernel.org/pub/scm/linux/kernel/... (diff)
parentARM: fix building NOMMU ARMv4/v5 kernels (diff)
downloadlinux-9c0e6a89b592f4c4e4d769dbc22d399ab0685159.tar.xz
linux-9c0e6a89b592f4c4e4d769dbc22d399ab0685159.zip
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM updates from Russell King: "Updates for IRQ stacks and virtually mapped stack support, and ftrace: - Support for IRQ and vmap'ed stacks This covers all the work related to implementing IRQ stacks and vmap'ed stacks for all 32-bit ARM systems that are currently supported by the Linux kernel, including RiscPC and Footbridge. It has been submitted for review in four different waves: - IRQ stacks support for v7 SMP systems [0] - vmap'ed stacks support for v7 SMP systems[1] - extending support for both IRQ stacks and vmap'ed stacks for all remaining configurations, including v6/v7 SMP multiplatform kernels and uniprocessor configurations including v7-M [2] - fixes and updates in [3] - ftrace fixes and cleanups Make all flavors of ftrace available on all builds, regardless of ISA choice, unwinder choice or compiler [4]: - use ADD not POP where possible - fix a couple of Thumb2 related issues - enable HAVE_FUNCTION_GRAPH_FP_TEST for robustness - enable the graph tracer with the EABI unwinder - avoid clobbering frame pointer registers to make Clang happy - Fixes for the above" [0] https://lore.kernel.org/linux-arm-kernel/20211115084732.3704393-1-ardb@kernel.org/ [1] https://lore.kernel.org/linux-arm-kernel/20211122092816.2865873-1-ardb@kernel.org/ [2] https://lore.kernel.org/linux-arm-kernel/20211206164659.1495084-1-ardb@kernel.org/ [3] https://lore.kernel.org/linux-arm-kernel/20220124174744.1054712-1-ardb@kernel.org/ [4] https://lore.kernel.org/linux-arm-kernel/20220203082204.1176734-1-ardb@kernel.org/ * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (62 commits) ARM: fix building NOMMU ARMv4/v5 kernels ARM: unwind: only permit stack switch when unwinding call_with_stack() ARM: Revert "unwind: dump exception stack from calling frame" ARM: entry: fix unwinder problems caused by IRQ stacks ARM: unwind: set frame.pc correctly for current-thread unwinding ARM: 9184/1: return_address: disable again for CONFIG_ARM_UNWIND=y ARM: 9183/1: unwind: avoid spurious warnings on bogus code addresses Revert "ARM: 9144/1: forbid ftrace with clang and thumb2_kernel" ARM: mach-bcm: disable ftrace in SMC invocation routines ARM: cacheflush: avoid clobbering the frame pointer ARM: kprobes: treat R7 as the frame pointer register in Thumb2 builds ARM: ftrace: enable the graph tracer with the EABI unwinder ARM: unwind: track location of LR value in stack frame ARM: ftrace: enable HAVE_FUNCTION_GRAPH_FP_TEST ARM: ftrace: avoid unnecessary literal loads ARM: ftrace: avoid redundant loads or clobbering IP ARM: ftrace: use trampolines to keep .init.text in branching range ARM: ftrace: use ADD not POP to counter PUSH at entry ARM: ftrace: ensure that ADR takes the Thumb bit into account ARM: make get_current() and __my_cpu_offset() __always_inline ...
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/Makefile1
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-armv.S183
-rw-r--r--arch/arm/kernel/entry-common.S16
-rw-r--r--arch/arm/kernel/entry-ftrace.S128
-rw-r--r--arch/arm/kernel/entry-header.S47
-rw-r--r--arch/arm/kernel/entry-v7m.S39
-rw-r--r--arch/arm/kernel/ftrace.c62
-rw-r--r--arch/arm/kernel/head-common.S4
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/irq.c61
-rw-r--r--arch/arm/kernel/module.c90
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/return_address.c3
-rw-r--r--arch/arm/kernel/setup.c8
-rw-r--r--arch/arm/kernel/sleep.S13
-rw-r--r--arch/arm/kernel/smp.c11
-rw-r--r--arch/arm/kernel/stacktrace.c3
-rw-r--r--arch/arm/kernel/traps.c93
-rw-r--r--arch/arm/kernel/unwind.c64
-rw-r--r--arch/arm/kernel/vmlinux.lds.S4
21 files changed, 664 insertions, 183 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 6ef3b535b7bf..553866751e1a 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -10,6 +10,7 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_insn.o = -pg
CFLAGS_REMOVE_patch.o = -pg
+CFLAGS_REMOVE_unwind.o = -pg
endif
CFLAGS_REMOVE_return_address.o = -pg
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 645845e4982a..2c8d76fd7c66 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -43,9 +43,6 @@ int main(void)
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-#ifndef CONFIG_THREAD_INFO_IN_TASK
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
-#endif
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_CPU_DOMAIN, offsetof(struct thread_info, cpu_domain));
DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context));
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index ee3f7a599181..06508698abb8 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -19,9 +19,6 @@
#include <asm/glue-df.h>
#include <asm/glue-pf.h>
#include <asm/vfpmacros.h>
-#ifndef CONFIG_GENERIC_IRQ_MULTI_HANDLER
-#include <mach/entry-macro.S>
-#endif
#include <asm/thread_notify.h>
#include <asm/unwind.h>
#include <asm/unistd.h>
@@ -30,19 +27,35 @@
#include <asm/uaccess-asm.h>
#include "entry-header.S"
-#include <asm/entry-macro-multi.S>
#include <asm/probes.h>
/*
* Interrupt handling.
*/
- .macro irq_handler
-#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
- mov r0, sp
- bl generic_handle_arch_irq
-#else
- arch_irq_handler_default
+ .macro irq_handler, from_user:req
+ mov r1, sp
+ ldr_this_cpu r2, irq_stack_ptr, r2, r3
+ .if \from_user == 0
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r3, r2, r1 @ SP above bottom of IRQ stack?
+ rsbscs r3, r3, #THREAD_SIZE @ ... and below the top?
+#ifdef CONFIG_VMAP_STACK
+ ldr_va r3, high_memory, cc @ End of the linear region
+ cmpcc r3, r1 @ Stack pointer was below it?
#endif
+ bcc 0f @ If not, switch to the IRQ stack
+ mov r0, r1
+ bl generic_handle_arch_irq
+ b 1f
+0:
+ .endif
+
+ mov_l r0, generic_handle_arch_irq
+ bl call_with_stack
+1:
.endm
.macro pabt_helper
@@ -140,27 +153,35 @@ ENDPROC(__und_invalid)
#define SPFIX(code...)
#endif
- .macro svc_entry, stack_hole=0, trace=1, uaccess=1
+ .macro svc_entry, stack_hole=0, trace=1, uaccess=1, overflow_check=1
UNWIND(.fnstart )
- UNWIND(.save {r0 - pc} )
- sub sp, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
+ sub sp, sp, #(SVC_REGS_SIZE + \stack_hole)
+ THUMB( add sp, r1 ) @ get SP in a GPR without
+ THUMB( sub r1, sp, r1 ) @ using a temp register
+
+ .if \overflow_check
+ UNWIND(.save {r0 - pc} )
+ do_overflow_check (SVC_REGS_SIZE + \stack_hole)
+ .endif
+
#ifdef CONFIG_THUMB2_KERNEL
- SPFIX( str r0, [sp] ) @ temporarily saved
- SPFIX( mov r0, sp )
- SPFIX( tst r0, #4 ) @ test original stack alignment
- SPFIX( ldr r0, [sp] ) @ restored
+ tst r1, #4 @ test stack pointer alignment
+ sub r1, sp, r1 @ restore original R1
+ sub sp, r1 @ restore original SP
#else
SPFIX( tst sp, #4 )
#endif
- SPFIX( subeq sp, sp, #4 )
- stmia sp, {r1 - r12}
+ SPFIX( subne sp, sp, #4 )
+
+ ARM( stmib sp, {r1 - r12} )
+ THUMB( stmia sp, {r0 - r12} ) @ No STMIB in Thumb-2
ldmia r0, {r3 - r5}
- add r7, sp, #S_SP - 4 @ here for interlock avoidance
+ add r7, sp, #S_SP @ here for interlock avoidance
mov r6, #-1 @ "" "" "" ""
- add r2, sp, #(SVC_REGS_SIZE + \stack_hole - 4)
- SPFIX( addeq r2, r2, #4 )
- str r3, [sp, #-4]! @ save the "real" r0 copied
+ add r2, sp, #(SVC_REGS_SIZE + \stack_hole)
+ SPFIX( addne r2, r2, #4 )
+ str r3, [sp] @ save the "real" r0 copied
@ from the exception stack
mov r3, lr
@@ -199,7 +220,7 @@ ENDPROC(__dabt_svc)
.align 5
__irq_svc:
svc_entry
- irq_handler
+ irq_handler from_user=0
#ifdef CONFIG_PREEMPTION
ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -426,7 +447,7 @@ ENDPROC(__dabt_usr)
__irq_usr:
usr_entry
kuser_cmpxchg_check
- irq_handler
+ irq_handler from_user=1
get_thread_info tsk
mov why, #0
b ret_to_user_from_irq
@@ -752,16 +773,17 @@ ENTRY(__switch_to)
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
switch_tls r1, r4, r5, r3, r7
-#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- ldr r7, [r2, #TI_TASK]
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
+ !defined(CONFIG_STACKPROTECTOR_PER_TASK)
ldr r8, =__stack_chk_guard
.if (TSK_STACK_CANARY > IMM12_MASK)
- add r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
+ add r9, r2, #TSK_STACK_CANARY & ~IMM12_MASK
+ ldr r9, [r9, #TSK_STACK_CANARY & IMM12_MASK]
+ .else
+ ldr r9, [r2, #TSK_STACK_CANARY & IMM12_MASK]
.endif
- ldr r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
-#elif defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO)
- mov r7, r2 @ Preserve 'next'
#endif
+ mov r7, r2 @ Preserve 'next'
#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
@@ -770,19 +792,102 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
-#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
- str r7, [r8]
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) && \
+ !defined(CONFIG_STACKPROTECTOR_PER_TASK)
+ str r9, [r8]
#endif
- THUMB( mov ip, r4 )
mov r0, r5
- set_current r7
- ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
- THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously
- THUMB( ldr sp, [ip], #4 )
- THUMB( ldr pc, [ip] )
+#if !defined(CONFIG_THUMB2_KERNEL) && !defined(CONFIG_VMAP_STACK)
+ set_current r7, r8
+ ldmia r4, {r4 - sl, fp, sp, pc} @ Load all regs saved previously
+#else
+ mov r1, r7
+ ldmia r4, {r4 - sl, fp, ip, lr} @ Load all regs saved previously
+#ifdef CONFIG_VMAP_STACK
+ @
+ @ Do a dummy read from the new stack while running from the old one so
+ @ that we can rely on do_translation_fault() to fix up any stale PMD
+ @ entries covering the vmalloc region.
+ @
+ ldr r2, [ip]
+#endif
+
+ @ When CONFIG_THREAD_INFO_IN_TASK=n, the update of SP itself is what
+ @ effectuates the task switch, as that is what causes the observable
+ @ values of current and current_thread_info to change. When
+ @ CONFIG_THREAD_INFO_IN_TASK=y, setting current (and therefore
+ @ current_thread_info) is done explicitly, and the update of SP just
+ @ switches us to another stack, with few other side effects. In order
+ @ to prevent this distinction from causing any inconsistencies, let's
+ @ keep the 'set_current' call as close as we can to the update of SP.
+ set_current r1, r2
+ mov sp, ip
+ ret lr
+#endif
UNWIND(.fnend )
ENDPROC(__switch_to)
+#ifdef CONFIG_VMAP_STACK
+ .text
+ .align 2
+__bad_stack:
+ @
+ @ We've just detected an overflow. We need to load the address of this
+ @ CPU's overflow stack into the stack pointer register. We have only one
+ @ scratch register so let's use a sequence of ADDs including one
+ @ involving the PC, and decorate them with PC-relative group
+ @ relocations. As these are ARM only, switch to ARM mode first.
+ @
+ @ We enter here with IP clobbered and its value stashed on the mode
+ @ stack.
+ @
+THUMB( bx pc )
+THUMB( nop )
+THUMB( .arm )
+ ldr_this_cpu_armv6 ip, overflow_stack_ptr
+
+ str sp, [ip, #-4]! @ Preserve original SP value
+ mov sp, ip @ Switch to overflow stack
+ pop {ip} @ Original SP in IP
+
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ mov ip, ip @ mov expected by unwinder
+ push {fp, ip, lr, pc} @ GCC flavor frame record
+#else
+ str ip, [sp, #-8]! @ store original SP
+ push {fpreg, lr} @ Clang flavor frame record
+#endif
+UNWIND( ldr ip, [r0, #4] ) @ load exception LR
+UNWIND( str ip, [sp, #12] ) @ store in the frame record
+ ldr ip, [r0, #12] @ reload IP
+
+ @ Store the original GPRs to the new stack.
+ svc_entry uaccess=0, overflow_check=0
+
+UNWIND( .save {sp, pc} )
+UNWIND( .save {fpreg, lr} )
+UNWIND( .setfp fpreg, sp )
+
+ ldr fpreg, [sp, #S_SP] @ Add our frame record
+ @ to the linked list
+#if defined(CONFIG_UNWINDER_FRAME_POINTER) && defined(CONFIG_CC_IS_GCC)
+ ldr r1, [fp, #4] @ reload SP at entry
+ add fp, fp, #12
+#else
+ ldr r1, [fpreg, #8]
+#endif
+ str r1, [sp, #S_SP] @ store in pt_regs
+
+ @ Stash the regs for handle_bad_stack
+ mov r0, sp
+
+ @ Time to die
+ bl handle_bad_stack
+ nop
+UNWIND( .fnend )
+ENDPROC(__bad_stack)
+#endif
+
__INIT
/*
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index dbc1913ee30b..90d40f4d56cf 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -16,12 +16,14 @@
.equ NR_syscalls, __NR_syscalls
-#ifdef CONFIG_NEED_RET_TO_USER
-#include <mach/entry-macro.S>
-#else
- .macro arch_ret_to_user, tmp1, tmp2
- .endm
+ .macro arch_ret_to_user, tmp
+#ifdef CONFIG_ARCH_IOP32X
+ mrc p15, 0, \tmp, c15, c1, 0
+ tst \tmp, #(1 << 6)
+ bicne \tmp, \tmp, #(1 << 6)
+ mcrne p15, 0, \tmp, c15, c1, 0 @ Disable cp6 access
#endif
+ .endm
#include "entry-header.S"
@@ -55,7 +57,7 @@ __ret_fast_syscall:
/* perform architecture specific actions before user return */
- arch_ret_to_user r1, lr
+ arch_ret_to_user r1
restore_user_regs fast = 1, offset = S_OFF
UNWIND(.fnend )
@@ -128,7 +130,7 @@ no_work_pending:
asm_trace_hardirqs_on save = 0
/* perform architecture specific actions before user return */
- arch_ret_to_user r1, lr
+ arch_ret_to_user r1
ct_user_enter save = 0
restore_user_regs fast = 0, offset = 0
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
index a74289ebc803..3e7bcaca5e07 100644
--- a/arch/arm/kernel/entry-ftrace.S
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -22,12 +22,9 @@
* mcount can be thought of as a function called in the middle of a subroutine
* call. As such, it needs to be transparent for both the caller and the
* callee: the original lr needs to be restored when leaving mcount, and no
- * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
- * clobber the ip register. This is OK because the ARM calling convention
- * allows it to be clobbered in subroutines and doesn't use it to hold
- * parameters.)
+ * registers should be clobbered.
*
- * When using dynamic ftrace, we patch out the mcount call by a "pop {lr}"
+ * When using dynamic ftrace, we patch out the mcount call by a "add sp, #4"
* instead of the __gnu_mcount_nc call (see arch/arm/kernel/ftrace.c).
*/
@@ -38,23 +35,20 @@
.macro __mcount suffix
mcount_enter
- ldr r0, =ftrace_trace_function
- ldr r2, [r0]
- adr r0, .Lftrace_stub
+ ldr_va r2, ftrace_trace_function
+ badr r0, .Lftrace_stub
cmp r0, r2
bne 1f
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- ldr r1, =ftrace_graph_return
- ldr r2, [r1]
- cmp r0, r2
- bne ftrace_graph_caller\suffix
-
- ldr r1, =ftrace_graph_entry
- ldr r2, [r1]
- ldr r0, =ftrace_graph_entry_stub
- cmp r0, r2
- bne ftrace_graph_caller\suffix
+ ldr_va r2, ftrace_graph_return
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
+
+ ldr_va r2, ftrace_graph_entry
+ mov_l r0, ftrace_graph_entry_stub
+ cmp r0, r2
+ bne ftrace_graph_caller\suffix
#endif
mcount_exit
@@ -70,29 +64,27 @@
.macro __ftrace_regs_caller
- sub sp, sp, #8 @ space for PC and CPSR OLD_R0,
+ str lr, [sp, #-8]! @ store LR as PC and make space for CPSR/OLD_R0,
@ OLD_R0 will overwrite previous LR
- add ip, sp, #12 @ move in IP the value of SP as it was
- @ before the push {lr} of the mcount mechanism
+ ldr lr, [sp, #8] @ get previous LR
- str lr, [sp, #0] @ store LR instead of PC
+ str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
- ldr lr, [sp, #8] @ get previous LR
+ str lr, [sp, #-4]! @ store previous LR as LR
- str r0, [sp, #8] @ write r0 as OLD_R0 over previous LR
+ add lr, sp, #16 @ move in LR the value of SP as it was
+ @ before the push {lr} of the mcount mechanism
- stmdb sp!, {ip, lr}
- stmdb sp!, {r0-r11, lr}
+ push {r0-r11, ip, lr}
@ stack content at this point:
@ 0 4 48 52 56 60 64 68 72
- @ R0 | R1 | ... | LR | SP + 4 | previous LR | LR | PSR | OLD_R0 |
+ @ R0 | R1 | ... | IP | SP + 4 | previous LR | LR | PSR | OLD_R0 |
- mov r3, sp @ struct pt_regs*
+ mov r3, sp @ struct pt_regs*
- ldr r2, =function_trace_op
- ldr r2, [r2] @ pointer to the current
+ ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
ldr r1, [sp, #S_LR] @ lr of instrumented func
@@ -108,35 +100,37 @@ ftrace_regs_call:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_regs_call
ftrace_graph_regs_call:
- mov r0, r0
+ARM( mov r0, r0 )
+THUMB( nop.w )
#endif
@ pop saved regs
- ldmia sp!, {r0-r12} @ restore r0 through r12
- ldr ip, [sp, #8] @ restore PC
- ldr lr, [sp, #4] @ restore LR
- ldr sp, [sp, #0] @ restore SP
- mov pc, ip @ return
+ pop {r0-r11, ip, lr} @ restore r0 through r12
+ ldr lr, [sp], #4 @ restore LR
+ ldr pc, [sp], #12
.endm
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.macro __ftrace_graph_regs_caller
- sub r0, fp, #4 @ lr of instrumented routine (parent)
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
+ sub r0, fp, #4 @ lr of instrumented routine (parent)
+#else
+ add r0, sp, #S_LR
+#endif
@ called from __ftrace_regs_caller
- ldr r1, [sp, #S_PC] @ instrumented routine (func)
+ ldr r1, [sp, #S_PC] @ instrumented routine (func)
mcount_adjust_addr r1, r1
- mov r2, fp @ frame pointer
+ mov r2, fpreg @ frame pointer
+ add r3, sp, #PT_REGS_SIZE
bl prepare_ftrace_return
@ pop registers saved in ftrace_regs_caller
- ldmia sp!, {r0-r12} @ restore r0 through r12
- ldr ip, [sp, #8] @ restore PC
- ldr lr, [sp, #4] @ restore LR
- ldr sp, [sp, #0] @ restore SP
- mov pc, ip @ return
+ pop {r0-r11, ip, lr} @ restore r0 through r12
+ ldr lr, [sp], #4 @ restore LR
+ ldr pc, [sp], #12
.endm
#endif
@@ -149,8 +143,7 @@ ftrace_graph_regs_call:
mcount_adjust_addr r0, lr @ instrumented function
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
- ldr r2, =function_trace_op
- ldr r2, [r2] @ pointer to the current
+ ldr_va r2, function_trace_op @ pointer to the current
@ function tracing op
mov r3, #0 @ regs is NULL
#endif
@@ -162,14 +155,19 @@ ftrace_call\suffix:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
- mov r0, r0
+ARM( mov r0, r0 )
+THUMB( nop.w )
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
+#ifdef CONFIG_UNWINDER_FRAME_POINTER
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
+#else
+ add r0, sp, #20
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
@@ -178,7 +176,8 @@ ftrace_graph_call\suffix:
@ called from __mcount, untouched in lr
mcount_adjust_addr r1, lr @ instrumented routine (func)
#endif
- mov r2, fp @ frame pointer
+ mov r2, fpreg @ frame pointer
+ add r3, sp, #24
bl prepare_ftrace_return
mcount_exit
.endm
@@ -202,16 +201,17 @@ ftrace_graph_call\suffix:
.endm
.macro mcount_exit
- ldmia sp!, {r0-r3, ip, lr}
- ret ip
+ ldmia sp!, {r0-r3}
+ ldr lr, [sp, #4]
+ ldr pc, [sp], #8
.endm
ENTRY(__gnu_mcount_nc)
UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
- mov ip, lr
- ldmia sp!, {lr}
- ret ip
+ push {lr}
+ ldr lr, [sp, #4]
+ ldr pc, [sp], #8
#else
__mcount
#endif
@@ -256,17 +256,33 @@ ENDPROC(ftrace_graph_regs_caller)
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
- .globl return_to_handler
-return_to_handler:
+ENTRY(return_to_handler)
stmdb sp!, {r0-r3}
- mov r0, fp @ frame pointer
+ add r0, sp, #16 @ sp at exit of instrumented routine
bl ftrace_return_to_handler
mov lr, r0 @ r0 has real ret addr
ldmia sp!, {r0-r3}
ret lr
+ENDPROC(return_to_handler)
#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
ret lr
ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ __INIT
+
+ .macro init_tramp, dst:req
+ENTRY(\dst\()_from_init)
+ ldr pc, =\dst
+ENDPROC(\dst\()_from_init)
+ .endm
+
+ init_tramp ftrace_caller
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ init_tramp ftrace_regs_caller
+#endif
+#endif
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index ae24dd54e9ef..9a1dc142f782 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -292,12 +292,18 @@
.macro restore_user_regs, fast = 0, offset = 0
-#if defined(CONFIG_CPU_32v6K) && !defined(CONFIG_CPU_V6)
+#if defined(CONFIG_CPU_32v6K) && \
+ (!defined(CONFIG_CPU_V6) || defined(CONFIG_SMP))
+#ifdef CONFIG_CPU_V6
+ALT_SMP(nop)
+ALT_UP_B(.L1_\@)
+#endif
@ The TLS register update is deferred until return to user space so we
@ can use it for other things while running in the kernel
- get_thread_info r1
+ mrc p15, 0, r1, c13, c0, 3 @ get current_thread_info pointer
ldr r1, [r1, #TI_TP_VALUE]
mcr p15, 0, r1, c13, c0, 3 @ set TLS register
+.L1_\@:
#endif
uaccess_enable r1, isb=0
@@ -423,3 +429,40 @@ scno .req r7 @ syscall number
tbl .req r8 @ syscall table pointer
why .req r8 @ Linux syscall (!= 0)
tsk .req r9 @ current thread_info
+
+ .macro do_overflow_check, frame_size:req
+#ifdef CONFIG_VMAP_STACK
+ @
+ @ Test whether the SP has overflowed. Task and IRQ stacks are aligned
+ @ so that SP & BIT(THREAD_SIZE_ORDER + PAGE_SHIFT) should always be
+ @ zero.
+ @
+ARM( tst sp, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
+THUMB( tst r1, #1 << (THREAD_SIZE_ORDER + PAGE_SHIFT) )
+THUMB( it ne )
+ bne .Lstack_overflow_check\@
+
+ .pushsection .text
+.Lstack_overflow_check\@:
+ @
+ @ The stack pointer is not pointing to a valid vmap'ed stack, but it
+ @ may be pointing into the linear map instead, which may happen if we
+ @ are already running from the overflow stack. We cannot detect overflow
+ @ in such cases so just carry on.
+ @
+ str ip, [r0, #12] @ Stash IP on the mode stack
+ ldr_va ip, high_memory @ Start of VMALLOC space
+ARM( cmp sp, ip ) @ SP in vmalloc space?
+THUMB( cmp r1, ip )
+THUMB( itt lo )
+ ldrlo ip, [r0, #12] @ Restore IP
+ blo .Lout\@ @ Carry on
+
+THUMB( sub r1, sp, r1 ) @ Restore original R1
+THUMB( sub sp, r1 ) @ Restore original SP
+ add sp, sp, #\frame_size @ Undo svc_entry's SP change
+ b __bad_stack @ Handle VMAP stack overflow
+ .popsection
+.Lout\@:
+#endif
+ .endm
diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
index 7bde93c10962..de8a60363c85 100644
--- a/arch/arm/kernel/entry-v7m.S
+++ b/arch/arm/kernel/entry-v7m.S
@@ -39,16 +39,25 @@ __irq_entry:
@
@ Invoke the IRQ handler
@
- mrs r0, ipsr
- ldr r1, =V7M_xPSR_EXCEPTIONNO
- and r0, r1
- sub r0, #16
- mov r1, sp
- stmdb sp!, {lr}
- @ routine called with r0 = irq number, r1 = struct pt_regs *
- bl nvic_handle_irq
-
- pop {lr}
+ mov r0, sp
+ ldr_this_cpu sp, irq_stack_ptr, r1, r2
+
+ @
+ @ If we took the interrupt while running in the kernel, we may already
+ @ be using the IRQ stack, so revert to the original value in that case.
+ @
+ subs r2, sp, r0 @ SP above bottom of IRQ stack?
+ rsbscs r2, r2, #THREAD_SIZE @ ... and below the top?
+ movcs sp, r0
+
+ push {r0, lr} @ preserve LR and original SP
+
+ @ routine called with r0 = struct pt_regs *
+ bl generic_handle_arch_irq
+
+ pop {r0, lr}
+ mov sp, r0
+
@
@ Check for any pending work if returning to user
@
@@ -101,15 +110,17 @@ ENTRY(__switch_to)
str sp, [ip], #4
str lr, [ip], #4
mov r5, r0
+ mov r6, r2 @ Preserve 'next'
add r4, r2, #TI_CPU_SAVE
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
- mov ip, r4
mov r0, r5
- ldmia ip!, {r4 - r11} @ Load all regs saved previously
- ldr sp, [ip]
- ldr pc, [ip, #4]!
+ mov r1, r6
+ ldmia r4, {r4 - r12, lr} @ Load all regs saved previously
+ set_current r1, r2
+ mov sp, ip
+ bx lr
.fnend
ENDPROC(__switch_to)
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index a006585e1c09..83cc068586bc 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -22,12 +22,24 @@
#include <asm/ftrace.h>
#include <asm/insn.h>
#include <asm/set_memory.h>
+#include <asm/stacktrace.h>
#include <asm/patch.h>
+/*
+ * The compiler emitted profiling hook consists of
+ *
+ * PUSH {LR}
+ * BL __gnu_mcount_nc
+ *
+ * To turn this combined sequence into a NOP, we need to restore the value of
+ * SP before the PUSH. Let's use an ADD rather than a POP into LR, as LR is not
+ * modified anyway, and reloading LR from memory is highly likely to be less
+ * efficient.
+ */
#ifdef CONFIG_THUMB2_KERNEL
-#define NOP 0xf85deb04 /* pop.w {lr} */
+#define NOP 0xf10d0d04 /* add.w sp, sp, #4 */
#else
-#define NOP 0xe8bd4000 /* pop {lr} */
+#define NOP 0xe28dd004 /* add sp, sp, #4 */
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -51,9 +63,20 @@ static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
return NOP;
}
-static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
+void ftrace_caller_from_init(void);
+void ftrace_regs_caller_from_init(void);
+
+static unsigned long __ref adjust_address(struct dyn_ftrace *rec,
+ unsigned long addr)
{
- return addr;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE) ||
+ system_state >= SYSTEM_FREEING_INITMEM ||
+ likely(!is_kernel_inittext(rec->ip)))
+ return addr;
+ if (!IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS) ||
+ addr == (unsigned long)&ftrace_caller)
+ return (unsigned long)&ftrace_caller_from_init;
+ return (unsigned long)&ftrace_regs_caller_from_init;
}
int ftrace_arch_code_modify_prepare(void)
@@ -189,15 +212,23 @@ int ftrace_make_nop(struct module *mod,
#endif
new = ftrace_nop_replace(rec);
- ret = ftrace_modify_code(ip, old, new, true);
+ /*
+ * Locations in .init.text may call __gnu_mcount_mc via a linker
+ * emitted veneer if they are too far away from its implementation, and
+ * so validation may fail spuriously in such cases. Let's work around
+ * this by omitting those from validation.
+ */
+ ret = ftrace_modify_code(ip, old, new, !is_kernel_inittext(ip));
return ret;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+asmlinkage
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
- unsigned long frame_pointer)
+ unsigned long frame_pointer,
+ unsigned long stack_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
unsigned long old;
@@ -205,6 +236,23 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER)) {
+ /* FP points one word below parent's top of stack */
+ frame_pointer += 4;
+ } else {
+ struct stackframe frame = {
+ .fp = frame_pointer,
+ .sp = stack_pointer,
+ .lr = self_addr,
+ .pc = self_addr,
+ };
+ if (unwind_frame(&frame) < 0)
+ return;
+ if (frame.lr != self_addr)
+ parent = frame.lr_addr;
+ frame_pointer = frame.sp;
+ }
+
old = *parent;
*parent = return_hooker;
@@ -225,7 +273,7 @@ static int __ftrace_modify_caller(unsigned long *callsite,
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
unsigned long branch = arm_gen_branch(pc, caller_fn);
- unsigned long nop = 0xe1a00000; /* mov r0, r0 */
+ unsigned long nop = arm_gen_nop();
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index da18e0a17dc2..42cae73fcc19 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -105,10 +105,8 @@ __mmap_switched:
mov r1, #0
bl __memset @ clear .bss
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
adr_l r0, init_task @ get swapper task_struct
- set_current r0
-#endif
+ set_current r0, r1
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c04dd94630c7..500612d3da2e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -424,6 +424,13 @@ ENDPROC(secondary_startup)
ENDPROC(secondary_startup_arm)
ENTRY(__secondary_switched)
+#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
+ @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
+ @ as the ID map does not cover the vmalloc region.
+ mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
+ mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
+ instr_sync
+#endif
adr_l r7, secondary_data + 12 @ get secondary_data.stack
ldr sp, [r7]
ldr r0, [r7, #4] @ get secondary_data.task
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index b79975bd988c..5c6f8d11a3ce 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -36,13 +36,53 @@
#include <asm/hardware/cache-l2x0.h>
#include <asm/hardware/cache-uniphier.h>
#include <asm/outercache.h>
+#include <asm/softirq_stack.h>
#include <asm/exception.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
+#include "reboot.h"
+
unsigned long irq_err_count;
+#ifdef CONFIG_IRQSTACKS
+
+asmlinkage DEFINE_PER_CPU_READ_MOSTLY(u8 *, irq_stack_ptr);
+
+static void __init init_irq_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (!IS_ENABLED(CONFIG_VMAP_STACK))
+ stack = (u8 *)__get_free_pages(GFP_KERNEL,
+ THREAD_SIZE_ORDER);
+ else
+ stack = __vmalloc_node(THREAD_SIZE, THREAD_ALIGN,
+ THREADINFO_GFP, NUMA_NO_NODE,
+ __builtin_return_address(0));
+
+ if (WARN_ON(!stack))
+ break;
+ per_cpu(irq_stack_ptr, cpu) = &stack[THREAD_SIZE];
+ }
+}
+
+static void ____do_softirq(void *arg)
+{
+ __do_softirq();
+}
+
+void do_softirq_own_stack(void)
+{
+ call_with_stack(____do_softirq, NULL,
+ __this_cpu_read(irq_stack_ptr));
+}
+
+#endif
+
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_FIQ
@@ -80,27 +120,14 @@ void handle_IRQ(unsigned int irq, struct pt_regs *regs)
ack_bad_irq(irq);
}
-/*
- * asm_do_IRQ is the interface to be used from assembly code.
- */
-asmlinkage void __exception_irq_entry
-asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
-{
- struct pt_regs *old_regs;
-
- irq_enter();
- old_regs = set_irq_regs(regs);
-
- handle_IRQ(irq, regs);
-
- set_irq_regs(old_regs);
- irq_exit();
-}
-
void __init init_IRQ(void)
{
int ret;
+#ifdef CONFIG_IRQSTACKS
+ init_irq_stacks();
+#endif
+
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init();
else
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index beac45e89ba6..549abcedf795 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -68,6 +68,44 @@ bool module_exit_section(const char *name)
strstarts(name, ".ARM.exidx.exit");
}
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
+/*
+ * This implements the partitioning algorithm for group relocations as
+ * documented in the ARM AArch32 ELF psABI (IHI 0044).
+ *
+ * A single PC-relative symbol reference is divided in up to 3 add or subtract
+ * operations, where the final one could be incorporated into a load/store
+ * instruction with immediate offset. E.g.,
+ *
+ * ADD Rd, PC, #... or ADD Rd, PC, #...
+ * ADD Rd, Rd, #... ADD Rd, Rd, #...
+ * LDR Rd, [Rd, #...] ADD Rd, Rd, #...
+ *
+ * The latter has a guaranteed range of only 16 MiB (3x8 == 24 bits), so it is
+ * of limited use in the kernel. However, the ADD/ADD/LDR combo has a range of
+ * -/+ 256 MiB, (2x8 + 12 == 28 bits), which means it has sufficient range for
+ * any in-kernel symbol reference (unless module PLTs are being used).
+ *
+ * The main advantage of this approach over the typical pattern using a literal
+ * load is that literal loads may miss in the D-cache, and generally lead to
+ * lower cache efficiency for variables that are referenced often from many
+ * different places in the code.
+ */
+static u32 get_group_rem(u32 group, u32 *offset)
+{
+ u32 val = *offset;
+ u32 shift;
+ do {
+ shift = val ? (31 - __fls(val)) & ~1 : 32;
+ *offset = val;
+ if (!val)
+ break;
+ val &= 0xffffff >> shift;
+ } while (group--);
+ return shift;
+}
+#endif
+
int
apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned int relindex, struct module *module)
@@ -82,6 +120,9 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
unsigned long loc;
Elf32_Sym *sym;
const char *symname;
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
+ u32 shift, group = 1;
+#endif
s32 offset;
u32 tmp;
#ifdef CONFIG_THUMB2_KERNEL
@@ -212,6 +253,55 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
*(u32 *)loc = __opcode_to_mem_arm(tmp);
break;
+#ifdef CONFIG_ARM_HAS_GROUP_RELOCS
+ case R_ARM_ALU_PC_G0_NC:
+ group = 0;
+ fallthrough;
+ case R_ARM_ALU_PC_G1_NC:
+ tmp = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = ror32(tmp & 0xff, (tmp & 0xf00) >> 7);
+ if (tmp & BIT(22))
+ offset = -offset;
+ offset += sym->st_value - loc;
+ if (offset < 0) {
+ offset = -offset;
+ tmp = (tmp & ~BIT(23)) | BIT(22); // SUB opcode
+ } else {
+ tmp = (tmp & ~BIT(22)) | BIT(23); // ADD opcode
+ }
+
+ shift = get_group_rem(group, &offset);
+ if (shift < 24) {
+ offset >>= 24 - shift;
+ offset |= (shift + 8) << 7;
+ }
+ *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
+ break;
+
+ case R_ARM_LDR_PC_G2:
+ tmp = __mem_to_opcode_arm(*(u32 *)loc);
+ offset = tmp & 0xfff;
+ if (~tmp & BIT(23)) // U bit cleared?
+ offset = -offset;
+ offset += sym->st_value - loc;
+ if (offset < 0) {
+ offset = -offset;
+ tmp &= ~BIT(23); // clear U bit
+ } else {
+ tmp |= BIT(23); // set U bit
+ }
+ get_group_rem(2, &offset);
+
+ if (offset > 0xfff) {
+ pr_err("%s: section %u reloc %u sym '%s': relocation %u out of range (%#lx -> %#x)\n",
+ module->name, relindex, i, symname,
+ ELF32_R_TYPE(rel->r_info), loc,
+ sym->st_value);
+ return -ENOEXEC;
+ }
+ *(u32 *)loc = __opcode_to_mem_arm((tmp & ~0xfff) | offset);
+ break;
+#endif
#ifdef CONFIG_THUMB2_KERNEL
case R_ARM_THM_CALL:
case R_ARM_THM_JUMP24:
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index d47159f3791c..0617af11377f 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -36,7 +36,7 @@
#include "signal.h"
-#ifdef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
DEFINE_PER_CPU(struct task_struct *, __entry_task);
#endif
@@ -46,6 +46,11 @@ unsigned long __stack_chk_guard __read_mostly;
EXPORT_SYMBOL(__stack_chk_guard);
#endif
+#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
+asmlinkage struct task_struct *__current;
+EXPORT_SYMBOL(__current);
+#endif
+
static const char *processor_modes[] __maybe_unused = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 00c11579406c..8aac1e10b117 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -41,7 +41,8 @@ void *return_address(unsigned int level)
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)return_address;
+here:
+ frame.pc = (unsigned long)&&here;
#ifdef CONFIG_KRETPROBES
frame.kr_cur = NULL;
frame.tsk = current;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 284a80c0b6e1..039feb7cd590 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -141,10 +141,10 @@ EXPORT_SYMBOL(outer_cache);
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
struct stack {
- u32 irq[3];
- u32 abt[3];
- u32 und[3];
- u32 fiq[3];
+ u32 irq[4];
+ u32 abt[4];
+ u32 und[4];
+ u32 fiq[4];
} ____cacheline_aligned;
#ifndef CONFIG_CPU_V7M
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 43077e11dafd..a86a1d4f3461 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -67,6 +67,12 @@ ENTRY(__cpu_suspend)
ldr r4, =cpu_suspend_size
#endif
mov r5, sp @ current virtual SP
+#ifdef CONFIG_VMAP_STACK
+ @ Run the suspend code from the overflow stack so we don't have to rely
+ @ on vmalloc-to-phys conversions anywhere in the arch suspend code.
+ @ The original SP value captured in R5 will be restored on the way out.
+ ldr_this_cpu sp, overflow_stack_ptr, r6, r7
+#endif
add r4, r4, #12 @ Space for pgd, virt sp, phys resume fn
sub sp, sp, r4 @ allocate CPU state on stack
ldr r3, =sleep_save_sp
@@ -113,6 +119,13 @@ ENTRY(cpu_resume_mmu)
ENDPROC(cpu_resume_mmu)
.popsection
cpu_resume_after_mmu:
+#if defined(CONFIG_VMAP_STACK) && !defined(CONFIG_ARM_LPAE)
+ @ Before using the vmap'ed stack, we have to switch to swapper_pg_dir
+ @ as the ID map does not cover the vmalloc region.
+ mrc p15, 0, ip, c2, c0, 1 @ read TTBR1
+ mcr p15, 0, ip, c2, c0, 0 @ set TTBR0
+ instr_sync
+#endif
bl cpu_init @ restore the und/abt/irq banked regs
mov r0, #0 @ return zero on success
ldmfd sp!, {r4 - r11, pc}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 97ee6b1567e9..73fc645fc4c7 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -400,6 +400,12 @@ static void smp_store_cpu_info(unsigned int cpuid)
check_cpu_icache_size(cpuid);
}
+static void set_current(struct task_struct *cur)
+{
+ /* Set TPIDRURO */
+ asm("mcr p15, 0, %0, c13, c0, 3" :: "r"(cur) : "memory");
+}
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -628,11 +634,6 @@ static void ipi_complete(unsigned int cpu)
/*
* Main handler for inter-processor interrupts
*/
-asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs)
-{
- handle_IPI(ipinr, regs);
-}
-
static void do_handle_IPI(int ipinr)
{
unsigned int cpu = smp_processor_id();
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 75e905508f27..b5efecb3d730 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -160,7 +160,8 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)__save_stack_trace;
+here:
+ frame.pc = (unsigned long)&&here;
}
#ifdef CONFIG_KRETPROBES
frame.kr_cur = NULL;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cae4a748811f..b532039286a2 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -36,6 +36,7 @@
#include <asm/ptrace.h>
#include <asm/unwind.h>
#include <asm/tls.h>
+#include <asm/stacktrace.h>
#include <asm/system_misc.h>
#include <asm/opcodes.h>
@@ -61,13 +62,24 @@ static int __init user_debug_setup(char *str)
__setup("user_debug=", user_debug_setup);
#endif
-static void dump_mem(const char *, const char *, unsigned long, unsigned long);
-
void dump_backtrace_entry(unsigned long where, unsigned long from,
unsigned long frame, const char *loglvl)
{
unsigned long end = frame + 4 + sizeof(struct pt_regs);
+ if (IS_ENABLED(CONFIG_UNWINDER_FRAME_POINTER) &&
+ IS_ENABLED(CONFIG_CC_IS_GCC) &&
+ end > ALIGN(frame, THREAD_SIZE)) {
+ /*
+ * If we are walking past the end of the stack, it may be due
+ * to the fact that we are on an IRQ or overflow stack. In this
+ * case, we can load the address of the other stack from the
+ * frame record.
+ */
+ frame = ((unsigned long *)frame)[-2] - 4;
+ end = frame + 4 + sizeof(struct pt_regs);
+ }
+
#ifndef CONFIG_KALLSYMS
printk("%sFunction entered at [<%08lx>] from [<%08lx>]\n",
loglvl, where, from);
@@ -111,7 +123,8 @@ void dump_backtrace_stm(u32 *stack, u32 instruction, const char *loglvl)
static int verify_stack(unsigned long sp)
{
if (sp < PAGE_OFFSET ||
- (sp > (unsigned long)high_memory && high_memory != NULL))
+ (!IS_ENABLED(CONFIG_VMAP_STACK) &&
+ sp > (unsigned long)high_memory && high_memory != NULL))
return -EFAULT;
return 0;
@@ -121,8 +134,8 @@ static int verify_stack(unsigned long sp)
/*
* Dump out the contents of some memory nicely...
*/
-static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
- unsigned long top)
+void dump_mem(const char *lvl, const char *str, unsigned long bottom,
+ unsigned long top)
{
unsigned long first;
int i;
@@ -281,7 +294,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
- THREAD_SIZE + (unsigned long)task_stack_page(tsk));
+ ALIGN(regs->ARM_sp - THREAD_SIZE, THREAD_ALIGN)
+ + THREAD_SIZE);
dump_backtrace(regs, tsk, KERN_EMERG);
dump_instr(KERN_EMERG, regs);
}
@@ -880,3 +894,70 @@ void __init early_trap_init(void *vectors_base)
*/
}
#endif
+
+#ifdef CONFIG_VMAP_STACK
+
+DECLARE_PER_CPU(u8 *, irq_stack_ptr);
+
+asmlinkage DEFINE_PER_CPU(u8 *, overflow_stack_ptr);
+
+static int __init allocate_overflow_stacks(void)
+{
+ u8 *stack;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ stack = (u8 *)__get_free_page(GFP_KERNEL);
+ if (WARN_ON(!stack))
+ return -ENOMEM;
+ per_cpu(overflow_stack_ptr, cpu) = &stack[OVERFLOW_STACK_SIZE];
+ }
+ return 0;
+}
+early_initcall(allocate_overflow_stacks);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs)
+{
+ unsigned long tsk_stk = (unsigned long)current->stack;
+#ifdef CONFIG_IRQSTACKS
+ unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
+#endif
+ unsigned long ovf_stk = (unsigned long)this_cpu_read(overflow_stack_ptr);
+
+ console_verbose();
+ pr_emerg("Insufficient stack space to handle exception!");
+
+ pr_emerg("Task stack: [0x%08lx..0x%08lx]\n",
+ tsk_stk, tsk_stk + THREAD_SIZE);
+#ifdef CONFIG_IRQSTACKS
+ pr_emerg("IRQ stack: [0x%08lx..0x%08lx]\n",
+ irq_stk - THREAD_SIZE, irq_stk);
+#endif
+ pr_emerg("Overflow stack: [0x%08lx..0x%08lx]\n",
+ ovf_stk - OVERFLOW_STACK_SIZE, ovf_stk);
+
+ die("kernel stack overflow", regs, 0);
+}
+
+#ifndef CONFIG_ARM_LPAE
+/*
+ * Normally, we rely on the logic in do_translation_fault() to update stale PMD
+ * entries covering the vmalloc space in a task's page tables when it first
+ * accesses the region in question. Unfortunately, this is not sufficient when
+ * the task stack resides in the vmalloc region, as do_translation_fault() is a
+ * C function that needs a stack to run.
+ *
+ * So we need to ensure that these PMD entries are up to date *before* the MM
+ * switch. As we already have some logic in the MM switch path that takes care
+ * of this, let's trigger it by bumping the counter every time the core vmalloc
+ * code modifies a PMD entry in the vmalloc region. Use release semantics on
+ * the store so that other CPUs observing the counter's new value are
+ * guaranteed to see the updated page table entries as well.
+ */
+void arch_sync_kernel_mappings(unsigned long start, unsigned long end)
+{
+ if (start < VMALLOC_END && end > VMALLOC_START)
+ atomic_inc_return_release(&init_mm.context.vmalloc_seq);
+}
+#endif
+#endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 59fdf257bf8b..a37ea6c772cd 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -33,6 +33,8 @@
#include <asm/traps.h>
#include <asm/unwind.h>
+#include "reboot.h"
+
/* Dummy functions to avoid linker complaints */
void __aeabi_unwind_cpp_pr0(void)
{
@@ -53,6 +55,7 @@ struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
const unsigned long *insn; /* pointer to the current instructions word */
unsigned long sp_high; /* highest value of sp allowed */
+ unsigned long *lr_addr; /* address of LR value on the stack */
/*
* 1 : check for stack overflow for each register pop.
* 0 : save overhead if there is plenty of stack remaining.
@@ -237,6 +240,8 @@ static int unwind_pop_register(struct unwind_ctrl_block *ctrl,
* from being tracked by KASAN.
*/
ctrl->vrs[reg] = READ_ONCE_NOCHECK(*(*vsp));
+ if (reg == 14)
+ ctrl->lr_addr = *vsp;
(*vsp)++;
return URC_OK;
}
@@ -256,8 +261,9 @@ static int unwind_exec_pop_subset_r4_to_r13(struct unwind_ctrl_block *ctrl,
mask >>= 1;
reg++;
}
- if (!load_sp)
+ if (!load_sp) {
ctrl->vrs[SP] = (unsigned long)vsp;
+ }
return URC_OK;
}
@@ -313,9 +319,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if ((insn & 0xc0) == 0x00)
ctrl->vrs[SP] += ((insn & 0x3f) << 2) + 4;
- else if ((insn & 0xc0) == 0x40)
+ else if ((insn & 0xc0) == 0x40) {
ctrl->vrs[SP] -= ((insn & 0x3f) << 2) + 4;
- else if ((insn & 0xf0) == 0x80) {
+ } else if ((insn & 0xf0) == 0x80) {
unsigned long mask;
insn = (insn << 8) | unwind_get_byte(ctrl);
@@ -330,9 +336,9 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
if (ret)
goto error;
} else if ((insn & 0xf0) == 0x90 &&
- (insn & 0x0d) != 0x0d)
+ (insn & 0x0d) != 0x0d) {
ctrl->vrs[SP] = ctrl->vrs[insn & 0x0f];
- else if ((insn & 0xf0) == 0xa0) {
+ } else if ((insn & 0xf0) == 0xa0) {
ret = unwind_exec_pop_r4_to_rN(ctrl, insn);
if (ret)
goto error;
@@ -375,23 +381,22 @@ error:
*/
int unwind_frame(struct stackframe *frame)
{
- unsigned long low;
const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
+ unsigned long sp_low;
/* store the highest address on the stack to avoid crossing it*/
- low = frame->sp;
- ctrl.sp_high = ALIGN(low, THREAD_SIZE);
+ sp_low = frame->sp;
+ ctrl.sp_high = ALIGN(sp_low - THREAD_SIZE, THREAD_ALIGN)
+ + THREAD_SIZE;
pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
frame->pc, frame->lr, frame->sp);
- if (!kernel_text_address(frame->pc))
- return -URC_FAILURE;
-
idx = unwind_find_idx(frame->pc);
if (!idx) {
- pr_warn("unwind: Index not found %08lx\n", frame->pc);
+ if (frame->pc && kernel_text_address(frame->pc))
+ pr_warn("unwind: Index not found %08lx\n", frame->pc);
return -URC_FAILURE;
}
@@ -403,7 +408,20 @@ int unwind_frame(struct stackframe *frame)
if (idx->insn == 1)
/* can't unwind */
return -URC_FAILURE;
- else if ((idx->insn & 0x80000000) == 0)
+ else if (frame->pc == prel31_to_addr(&idx->addr_offset)) {
+ /*
+ * Unwinding is tricky when we're halfway through the prologue,
+ * since the stack frame that the unwinder expects may not be
+ * fully set up yet. However, one thing we do know for sure is
+ * that if we are unwinding from the very first instruction of
+ * a function, we are still effectively in the stack frame of
+ * the caller, and the unwind info has no relevance yet.
+ */
+ if (frame->pc == frame->lr)
+ return -URC_FAILURE;
+ frame->pc = frame->lr;
+ return URC_OK;
+ } else if ((idx->insn & 0x80000000) == 0)
/* prel31 to the unwind table */
ctrl.insn = (unsigned long *)prel31_to_addr(&idx->insn);
else if ((idx->insn & 0xff000000) == 0x80000000)
@@ -430,6 +448,16 @@ int unwind_frame(struct stackframe *frame)
ctrl.check_each_pop = 0;
+ if (prel31_to_addr(&idx->addr_offset) == (u32)&call_with_stack) {
+ /*
+ * call_with_stack() is the only place where we permit SP to
+ * jump from one stack to another, and since we know it is
+ * guaranteed to happen, set up the SP bounds accordingly.
+ */
+ sp_low = frame->fp;
+ ctrl.sp_high = ALIGN(frame->fp, THREAD_SIZE);
+ }
+
while (ctrl.entries > 0) {
int urc;
if ((ctrl.sp_high - ctrl.vrs[SP]) < sizeof(ctrl.vrs))
@@ -437,7 +465,7 @@ int unwind_frame(struct stackframe *frame)
urc = unwind_exec_insn(&ctrl);
if (urc < 0)
return urc;
- if (ctrl.vrs[SP] < low || ctrl.vrs[SP] >= ctrl.sp_high)
+ if (ctrl.vrs[SP] < sp_low || ctrl.vrs[SP] > ctrl.sp_high)
return -URC_FAILURE;
}
@@ -452,6 +480,7 @@ int unwind_frame(struct stackframe *frame)
frame->sp = ctrl.vrs[SP];
frame->lr = ctrl.vrs[LR];
frame->pc = ctrl.vrs[PC];
+ frame->lr_addr = ctrl.lr_addr;
return URC_OK;
}
@@ -475,7 +504,12 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk,
frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_stack_pointer;
frame.lr = (unsigned long)__builtin_return_address(0);
- frame.pc = (unsigned long)unwind_backtrace;
+ /* We are saving the stack and execution state at this
+ * point, so we should ensure that frame.pc is within
+ * this block of code.
+ */
+here:
+ frame.pc = (unsigned long)&&here;
} else {
/* task blocked in __switch_to */
frame.fp = thread_saved_fp(tsk);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index f02d617e3359..aa12b65a7fd6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -138,12 +138,12 @@ SECTIONS
#ifdef CONFIG_STRICT_KERNEL_RWX
. = ALIGN(1<<SECTION_SHIFT);
#else
- . = ALIGN(THREAD_SIZE);
+ . = ALIGN(THREAD_ALIGN);
#endif
__init_end = .;
_sdata = .;
- RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_ALIGN)
_edata = .;
BSS_SECTION(0, 0, 0)