diff options
author | Shaohua Li <shaohua.li@intel.com> | 2007-11-13 07:55:20 +0100 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-12-08 01:12:50 +0100 |
commit | 3661999a17b0397c7a93c200b280c55958ba3593 (patch) | |
tree | 896f2b9a6580423cc7629dce446e7483eaba313c /arch | |
parent | [IA64] operator priority fix in acpi_map_lsapic() (diff) | |
download | linux-3661999a17b0397c7a93c200b280c55958ba3593.tar.xz linux-3661999a17b0397c7a93c200b280c55958ba3593.zip |
[IA64] kprobe: make kreturn probe handler stack unwind correct
Restore regs->ccr_iip before kreturn probe handler runs. In this way, if
probe handler does unwind, unwind can correctly get the stack trace.
Fixes: http://sourceware.org/bugzilla/show_bug.cgi?id=5051
Signed-off-by: Shaohua Li <shaohua.li@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/kernel/kprobes.c | 19 |
1 files changed, 17 insertions, 2 deletions
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 90518e43aba0..fc4d2676264f 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -435,6 +435,23 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) /* another task is sharing our hash bucket */ continue; + orig_ret_address = (unsigned long)ri->ret_addr; + if (orig_ret_address != trampoline_address) + /* + * This is the real return address. Any other + * instances associated with this task are for + * other calls deeper on the call stack + */ + break; + } + + regs->cr_iip = orig_ret_address; + + hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + if (ri->task != current) + /* another task is sharing our hash bucket */ + continue; + if (ri->rp && ri->rp->handler) ri->rp->handler(ri, regs); @@ -452,8 +469,6 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) kretprobe_assert(ri, orig_ret_address, trampoline_address); - regs->cr_iip = orig_ret_address; - reset_current_kprobe(); spin_unlock_irqrestore(&kretprobe_lock, flags); preempt_enable_no_resched(); |