From b344e24a8e8ceda83d1285d22e3e5baf4f5e42d3 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 21:54:48 +0100 Subject: sh: unwinder: Introduce UNWINDER_BUG() and UNWINDER_BUG_ON() We can't assume that if we execute the unwinder code and the unwinder was already running that it has faulted. Clearly two kernel threads can invoke the unwinder at the same time and may be running simultaneously. The previous approach used BUG() and BUG_ON() in the unwinder code to detect whether the unwinder was incapable of unwinding the stack, and that the next available unwinder should be used instead. A better approach is to explicitly invoke a trap handler to switch unwinders when the current unwinder cannot continue. Signed-off-by: Matt Fleming --- arch/sh/kernel/unwinder.c | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'arch/sh/kernel/unwinder.c') diff --git a/arch/sh/kernel/unwinder.c b/arch/sh/kernel/unwinder.c index 2b30fa28b440..b9c122abe251 100644 --- a/arch/sh/kernel/unwinder.c +++ b/arch/sh/kernel/unwinder.c @@ -53,8 +53,6 @@ static struct list_head unwinder_list = { static DEFINE_SPINLOCK(unwinder_lock); -static atomic_t unwinder_running = ATOMIC_INIT(0); - /** * select_unwinder - Select the best registered stack unwinder. * @@ -122,6 +120,8 @@ int unwinder_register(struct unwinder *u) return ret; } +int unwinder_faulted = 0; + /* * Unwind the call stack and pass information to the stacktrace_ops * functions. Also handle the case where we need to switch to a new @@ -144,19 +144,40 @@ void unwind_stack(struct task_struct *task, struct pt_regs *regs, * Hopefully this will give us a semi-reliable stacktrace so we * can diagnose why curr_unwinder->dump() faulted. */ - if (atomic_inc_return(&unwinder_running) != 1) { + if (unwinder_faulted) { spin_lock_irqsave(&unwinder_lock, flags); - if (!list_is_singular(&unwinder_list)) { + /* Make sure no one beat us to changing the unwinder */ + if (unwinder_faulted && !list_is_singular(&unwinder_list)) { list_del(&curr_unwinder->list); curr_unwinder = select_unwinder(); + + unwinder_faulted = 0; } spin_unlock_irqrestore(&unwinder_lock, flags); - atomic_dec(&unwinder_running); } curr_unwinder->dump(task, regs, sp, ops, data); +} + +/* + * Trap handler for UWINDER_BUG() statements. We must switch to the + * unwinder with the next highest rating. + */ +BUILD_TRAP_HANDLER(unwinder) +{ + insn_size_t insn; + TRAP_HANDLER_DECL; + + /* Rewind */ + regs->pc -= instruction_size(ctrl_inw(regs->pc - 4)); + insn = *(insn_size_t *)instruction_pointer(regs); + + /* Switch unwinders when unwind_stack() is called */ + unwinder_faulted = 1; - atomic_dec(&unwinder_running); +#ifdef CONFIG_BUG + handle_BUG(regs); +#endif } -- cgit v1.2.3