diff options
author | Phil Auld <pauld@redhat.com> | 2020-06-29 21:23:03 +0200 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2020-07-08 11:39:02 +0200 |
commit | 9d246053a69196c7c27068870e9b4b66ac536f68 (patch) | |
tree | 25cd59b9266c05a1bd7aab1792d084860f2965f6 | |
parent | net: Restrict receive packets queuing to housekeeping CPUs (diff) | |
download | linux-9d246053a69196c7c27068870e9b4b66ac536f68.tar.xz linux-9d246053a69196c7c27068870e9b4b66ac536f68.zip |
sched: Add a tracepoint to track rq->nr_running
Add a bare tracepoint trace_sched_update_nr_running_tp which tracks
->nr_running CPU's rq. This is used to accurately trace this data and
provide a visualization of scheduler imbalances in, for example, the
form of a heat map. The tracepoint is accessed by loading an external
kernel module. An example module (forked from Qais' module and including
the pelt related tracepoints) can be found at:
https://github.com/auldp/tracepoints-helpers.git
A script to turn the trace-cmd report output into a heatmap plot can be
found at:
https://github.com/jirvoz/plot-nr-running
The tracepoints are added to add_nr_running() and sub_nr_running() which
are in kernel/sched/sched.h. In order to avoid CREATE_TRACE_POINTS in
the header a wrapper call is used and the trace/events/sched.h include
is moved before sched.h in kernel/sched/core.
Signed-off-by: Phil Auld <pauld@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20200629192303.GC120228@lorien.usersys.redhat.com
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | include/trace/events/sched.h | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 13 | ||||
-rw-r--r-- | kernel/sched/fair.c | 8 | ||||
-rw-r--r-- | kernel/sched/pelt.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 10 |
6 files changed, 30 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 683372943093..12b10ce51a08 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2044,6 +2044,7 @@ const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq); const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq); int sched_trace_rq_cpu(struct rq *rq); +int sched_trace_rq_nr_running(struct rq *rq); const struct cpumask *sched_trace_rd_span(struct root_domain *rd); diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 04f9a4c7b0d9..0d5ff0958d48 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -642,6 +642,10 @@ DECLARE_TRACE(sched_util_est_se_tp, TP_PROTO(struct sched_entity *se), TP_ARGS(se)); +DECLARE_TRACE(sched_update_nr_running_tp, + TP_PROTO(struct rq *rq, int change), + TP_ARGS(rq, change)); + #endif /* _TRACE_SCHED_H */ /* This part must be outside protection */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4cf30e4de653..ff0519551188 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6,6 +6,10 @@ * * Copyright (C) 1991-2002 Linus Torvalds */ +#define CREATE_TRACE_POINTS +#include <trace/events/sched.h> +#undef CREATE_TRACE_POINTS + #include "sched.h" #include <linux/nospec.h> @@ -23,9 +27,6 @@ #include "pelt.h" #include "smp.h" -#define CREATE_TRACE_POINTS -#include <trace/events/sched.h> - /* * Export tracepoints that act as a bare tracehook (ie: have no trace event * associated with them) to allow external modules to probe them. @@ -38,6 +39,7 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(pelt_se_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp); EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp); +EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp); DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); @@ -8195,4 +8197,7 @@ const u32 sched_prio_to_wmult[40] = { /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; -#undef CREATE_TRACE_POINTS +void call_trace_sched_update_nr_running(struct rq *rq, int count) +{ + trace_sched_update_nr_running_tp(rq, count); +} diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6fab1d17c575..3213cb247aff 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -22,8 +22,6 @@ */ #include "sched.h" -#include <trace/events/sched.h> - /* * Targeted preemption latency for CPU-bound tasks: * @@ -11296,3 +11294,9 @@ const struct cpumask *sched_trace_rd_span(struct root_domain *rd) #endif } EXPORT_SYMBOL_GPL(sched_trace_rd_span); + +int sched_trace_rq_nr_running(struct rq *rq) +{ + return rq ? rq->nr_running : -1; +} +EXPORT_SYMBOL_GPL(sched_trace_rq_nr_running); diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c index 11bea3b08115..2c613e1cff3a 100644 --- a/kernel/sched/pelt.c +++ b/kernel/sched/pelt.c @@ -28,8 +28,6 @@ #include "sched.h" #include "pelt.h" -#include <trace/events/sched.h> - /* * Approximate: * val * y^n, where y^32 ~= 0.5 (~1 scheduling period) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b1432f608061..65b72e0487bf 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -76,6 +76,8 @@ #include "cpupri.h" #include "cpudeadline.h" +#include <trace/events/sched.h> + #ifdef CONFIG_SCHED_DEBUG # define SCHED_WARN_ON(x) WARN_ONCE(x, #x) #else @@ -97,6 +99,7 @@ extern atomic_long_t calc_load_tasks; extern void calc_global_load_tick(struct rq *this_rq); extern long calc_load_fold_active(struct rq *this_rq, long adjust); +extern void call_trace_sched_update_nr_running(struct rq *rq, int count); /* * Helpers for converting nanosecond timing to jiffy resolution */ @@ -1973,6 +1976,9 @@ static inline void add_nr_running(struct rq *rq, unsigned count) unsigned prev_nr = rq->nr_running; rq->nr_running = prev_nr + count; + if (trace_sched_update_nr_running_tp_enabled()) { + call_trace_sched_update_nr_running(rq, count); + } #ifdef CONFIG_SMP if (prev_nr < 2 && rq->nr_running >= 2) { @@ -1987,6 +1993,10 @@ static inline void add_nr_running(struct rq *rq, unsigned count) static inline void sub_nr_running(struct rq *rq, unsigned count) { rq->nr_running -= count; + if (trace_sched_update_nr_running_tp_enabled()) { + call_trace_sched_update_nr_running(rq, count); + } + /* Check if we still need preemption */ sched_update_tick_dependency(rq); } |