summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorNaveen N. Rao <naveen.n.rao@linux.vnet.ibm.com>2017-10-23 18:37:39 +0200
committerMichael Ellerman <mpe@ellerman.id.au>2017-11-12 13:51:41 +0100
commitf72180cc93a2c64d4efe0129fa2396ad78be80e3 (patch)
tree5bcb7d2e763cbd45bcedf15f99bf44ecdf0e1a7d /arch/powerpc
parentpowerpc/kprobes: Disable preemption before invoking probe handler for optprobes (diff)
downloadlinux-f72180cc93a2c64d4efe0129fa2396ad78be80e3.tar.xz
linux-f72180cc93a2c64d4efe0129fa2396ad78be80e3.zip
powerpc/kprobes: Do not disable interrupts for optprobes and kprobes_on_ftrace
Per Documentation/kprobes.txt, we don't necessarily need to disable interrupts before invoking the kprobe handlers. Masami submitted similar changes for x86 via commit a19b2e3d783964 ("kprobes/x86: Remove IRQ disabling from ftrace-based/optimized kprobes"). Do the same for powerpc. Signed-off-by: Naveen N. Rao <naveen.n.rao@linux.vnet.ibm.com> Acked-by: Masami Hiramatsu <mhiramat@kernel.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/kprobes-ftrace.c10
-rw-r--r--arch/powerpc/kernel/optprobes.c10
2 files changed, 2 insertions, 18 deletions
diff --git a/arch/powerpc/kernel/kprobes-ftrace.c b/arch/powerpc/kernel/kprobes-ftrace.c
index 4b1f34f685b1..7a1f99f1b47f 100644
--- a/arch/powerpc/kernel/kprobes-ftrace.c
+++ b/arch/powerpc/kernel/kprobes-ftrace.c
@@ -75,11 +75,7 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
- unsigned long flags;
- /* Disable irq for emulating a breakpoint and avoiding preempt */
- local_irq_save(flags);
- hard_irq_disable();
preempt_disable();
p = get_kprobe((kprobe_opcode_t *)nip);
@@ -105,16 +101,14 @@ void kprobe_ftrace_handler(unsigned long nip, unsigned long parent_nip,
else {
/*
* If pre_handler returns !0, it sets regs->nip and
- * resets current kprobe. In this case, we still need
- * to restore irq, but not preemption.
+ * resets current kprobe. In this case, we should not
+ * re-enable preemption.
*/
- local_irq_restore(flags);
return;
}
}
end:
preempt_enable_no_resched();
- local_irq_restore(flags);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
diff --git a/arch/powerpc/kernel/optprobes.c b/arch/powerpc/kernel/optprobes.c
index 60ba7f1370a8..8237884ca389 100644
--- a/arch/powerpc/kernel/optprobes.c
+++ b/arch/powerpc/kernel/optprobes.c
@@ -115,14 +115,10 @@ static unsigned long can_optimize(struct kprobe *p)
static void optimized_callback(struct optimized_kprobe *op,
struct pt_regs *regs)
{
- unsigned long flags;
-
/* This is possible if op is under delayed unoptimizing */
if (kprobe_disabled(&op->kp))
return;
- local_irq_save(flags);
- hard_irq_disable();
preempt_disable();
if (kprobe_running()) {
@@ -135,13 +131,7 @@ static void optimized_callback(struct optimized_kprobe *op,
__this_cpu_write(current_kprobe, NULL);
}
- /*
- * No need for an explicit __hard_irq_enable() here.
- * local_irq_restore() will re-enable interrupts,
- * if they were hard disabled.
- */
preempt_enable_no_resched();
- local_irq_restore(flags);
}
NOKPROBE_SYMBOL(optimized_callback);