diff options
-rw-r--r-- | kernel/trace/trace.h | 15 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 48 | ||||
-rw-r--r-- | kernel/trace/trace_preemptirq.c | 25 |
3 files changed, 38 insertions, 50 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index d88cd9bb72f4..a62b678731e3 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -1827,6 +1827,21 @@ static inline int tracing_alloc_snapshot_instance(struct trace_array *tr) } #endif +#ifdef CONFIG_PREEMPT_TRACER +void tracer_preempt_on(unsigned long a0, unsigned long a1); +void tracer_preempt_off(unsigned long a0, unsigned long a1); +#else +static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { } +static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { } +#endif +#ifdef CONFIG_IRQSOFF_TRACER +void tracer_hardirqs_on(unsigned long a0, unsigned long a1); +void tracer_hardirqs_off(unsigned long a0, unsigned long a1); +#else +static inline void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { } +static inline void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { } +#endif + extern struct trace_iterator *tracepoint_print_iter; #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 4af990e9c594..94c1ba139b3b 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -605,40 +605,18 @@ static void irqsoff_tracer_stop(struct trace_array *tr) /* * We are only interested in hardirq on/off events: */ -static void tracer_hardirqs_on(void *none, unsigned long a0, unsigned long a1) +void tracer_hardirqs_on(unsigned long a0, unsigned long a1) { unsigned int pc = preempt_count(); - /* - * Tracepoint probes are expected to be called with preempt disabled, - * We don't care about being called with preempt disabled but we need - * to know in the future if that changes so we can remove the next - * preempt_enable. - */ - WARN_ON_ONCE(pc < PREEMPT_DISABLE_OFFSET); - - /* Use PREEMPT_DISABLE_OFFSET to handle !CONFIG_PREEMPT cases */ - pc -= PREEMPT_DISABLE_OFFSET; - if (!preempt_trace(pc) && irq_trace()) stop_critical_timing(a0, a1, pc); } -static void tracer_hardirqs_off(void *none, unsigned long a0, unsigned long a1) +void tracer_hardirqs_off(unsigned long a0, unsigned long a1) { unsigned int pc = preempt_count(); - /* - * Tracepoint probes are expected to be called with preempt disabled, - * We don't care about being called with preempt disabled but we need - * to know in the future if that changes so we can remove the next - * preempt_enable. - */ - WARN_ON_ONCE(pc < PREEMPT_DISABLE_OFFSET); - - /* Use PREEMPT_DISABLE_OFFSET to handle !CONFIG_PREEMPT cases */ - pc -= PREEMPT_DISABLE_OFFSET; - if (!preempt_trace(pc) && irq_trace()) start_critical_timing(a0, a1, pc); } @@ -647,15 +625,11 @@ static int irqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF; - register_trace_irq_disable(tracer_hardirqs_off, NULL); - register_trace_irq_enable(tracer_hardirqs_on, NULL); return __irqsoff_tracer_init(tr); } static void irqsoff_tracer_reset(struct trace_array *tr) { - unregister_trace_irq_disable(tracer_hardirqs_off, NULL); - unregister_trace_irq_enable(tracer_hardirqs_on, NULL); __irqsoff_tracer_reset(tr); } @@ -681,7 +655,7 @@ static struct tracer irqsoff_tracer __read_mostly = #endif /* CONFIG_IRQSOFF_TRACER */ #ifdef CONFIG_PREEMPT_TRACER -static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1) +void tracer_preempt_on(unsigned long a0, unsigned long a1) { int pc = preempt_count(); @@ -689,7 +663,7 @@ static void tracer_preempt_on(void *none, unsigned long a0, unsigned long a1) stop_critical_timing(a0, a1, pc); } -static void tracer_preempt_off(void *none, unsigned long a0, unsigned long a1) +void tracer_preempt_off(unsigned long a0, unsigned long a1) { int pc = preempt_count(); @@ -701,15 +675,11 @@ static int preemptoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_PREEMPT_OFF; - register_trace_preempt_disable(tracer_preempt_off, NULL); - register_trace_preempt_enable(tracer_preempt_on, NULL); return __irqsoff_tracer_init(tr); } static void preemptoff_tracer_reset(struct trace_array *tr) { - unregister_trace_preempt_disable(tracer_preempt_off, NULL); - unregister_trace_preempt_enable(tracer_preempt_on, NULL); __irqsoff_tracer_reset(tr); } @@ -740,21 +710,11 @@ static int preemptirqsoff_tracer_init(struct trace_array *tr) { trace_type = TRACER_IRQS_OFF | TRACER_PREEMPT_OFF; - register_trace_irq_disable(tracer_hardirqs_off, NULL); - register_trace_irq_enable(tracer_hardirqs_on, NULL); - register_trace_preempt_disable(tracer_preempt_off, NULL); - register_trace_preempt_enable(tracer_preempt_on, NULL); - return __irqsoff_tracer_init(tr); } static void preemptirqsoff_tracer_reset(struct trace_array *tr) { - unregister_trace_irq_disable(tracer_hardirqs_off, NULL); - unregister_trace_irq_enable(tracer_hardirqs_on, NULL); - unregister_trace_preempt_disable(tracer_preempt_off, NULL); - unregister_trace_preempt_enable(tracer_preempt_on, NULL); - __irqsoff_tracer_reset(tr); } diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index fa656b25f427..71f553cceb3c 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -9,6 +9,7 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> +#include "trace.h" #define CREATE_TRACE_POINTS #include <trace/events/preemptirq.h> @@ -20,7 +21,9 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu); void trace_hardirqs_on(void) { if (this_cpu_read(tracing_irq_cpu)) { - trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + if (!in_nmi()) + trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + tracer_hardirqs_on(CALLER_ADDR0, CALLER_ADDR1); this_cpu_write(tracing_irq_cpu, 0); } @@ -32,7 +35,9 @@ void trace_hardirqs_off(void) { if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); - trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + tracer_hardirqs_off(CALLER_ADDR0, CALLER_ADDR1); + if (!in_nmi()) + trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); } lockdep_hardirqs_off(CALLER_ADDR0); @@ -42,7 +47,9 @@ EXPORT_SYMBOL(trace_hardirqs_off); __visible void trace_hardirqs_on_caller(unsigned long caller_addr) { if (this_cpu_read(tracing_irq_cpu)) { - trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); + if (!in_nmi()) + trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); + tracer_hardirqs_on(CALLER_ADDR0, caller_addr); this_cpu_write(tracing_irq_cpu, 0); } @@ -54,7 +61,9 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!this_cpu_read(tracing_irq_cpu)) { this_cpu_write(tracing_irq_cpu, 1); - trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); + tracer_hardirqs_off(CALLER_ADDR0, caller_addr); + if (!in_nmi()) + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); } lockdep_hardirqs_off(CALLER_ADDR0); @@ -66,11 +75,15 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); void trace_preempt_on(unsigned long a0, unsigned long a1) { - trace_preempt_enable_rcuidle(a0, a1); + if (!in_nmi()) + trace_preempt_enable_rcuidle(a0, a1); + tracer_preempt_on(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { - trace_preempt_disable_rcuidle(a0, a1); + if (!in_nmi()) + trace_preempt_disable_rcuidle(a0, a1); + tracer_preempt_off(a0, a1); } #endif |