summaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_irqsoff.c
diff options
context:
space:
mode:
authorJoel Fernandes <joelaf@google.com>2017-10-11 00:51:37 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2017-10-11 00:58:43 +0200
commitd59158162e032917a428704160a2063a02405ec6 (patch)
tree8caf4e79a232898372d6010a1a4a6b553c2950e4 /kernel/trace/trace_irqsoff.c
parenttracing: Prepare to add preempt and irq trace events (diff)
downloadlinux-d59158162e032917a428704160a2063a02405ec6.tar.xz
linux-d59158162e032917a428704160a2063a02405ec6.zip
tracing: Add support for preempt and irq enable/disable events
Preempt and irq trace events can be used for tracing the start and end of an atomic section which can be used by a trace viewer like systrace to graphically view the start and end of an atomic section and correlate them with latencies and scheduling issues. This also serves as a prelude to using synthetic events or probes to rewrite the preempt and irqsoff tracers, along with numerous benefits of using trace events features for these events. Link: http://lkml.kernel.org/r/20171006005432.14244-3-joelaf@google.com Link: http://lkml.kernel.org/r/20171010225137.17370-1-joelaf@google.com Cc: Peter Zilstra <peterz@infradead.org> Cc: kernel-team@android.com Signed-off-by: Joel Fernandes <joelaf@google.com> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_irqsoff.c')
-rw-r--r--kernel/trace/trace_irqsoff.c35
1 files changed, 34 insertions, 1 deletions
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 0e3033c00474..03ecb4465ee4 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,6 +16,9 @@
#include "trace.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -777,26 +780,53 @@ static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
#endif
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+/* Per-cpu variable to prevent redundant calls when IRQs already off */
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+
void trace_hardirqs_on(void)
{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_on();
+
+ this_cpu_write(tracing_irq_cpu, 0);
}
EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
tracer_hardirqs_off();
}
EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
+ if (!this_cpu_read(tracing_irq_cpu))
+ return;
+
+ trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
tracer_hardirqs_on_caller(caller_addr);
+
+ this_cpu_write(tracing_irq_cpu, 0);
}
EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
+ if (this_cpu_read(tracing_irq_cpu))
+ return;
+
+ this_cpu_write(tracing_irq_cpu, 1);
+
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
tracer_hardirqs_off_caller(caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -818,14 +848,17 @@ inline void print_irqtrace_events(struct task_struct *curr)
}
#endif
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+ (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
+ trace_preempt_enable_rcuidle(a0, a1);
tracer_preempt_on(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
+ trace_preempt_disable_rcuidle(a0, a1);
tracer_preempt_off(a0, a1);
}
#endif