summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig62
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/bpf_trace.c3
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/trace/trace.c265
-rw-r--r--kernel/trace/trace.h35
-rw-r--r--kernel/trace/trace_boot.c27
-rw-r--r--kernel/trace/trace_entries.h41
-rw-r--r--kernel/trace/trace_events_hist.c3
-rw-r--r--kernel/trace/trace_events_trigger.c3
-rw-r--r--kernel/trace/trace_hwlat.c534
-rw-r--r--kernel/trace/trace_osnoise.c2059
-rw-r--r--kernel/trace/trace_output.c119
-rw-r--r--kernel/trace/trace_sched_wakeup.c24
-rw-r--r--kernel/tracepoint.c33
15 files changed, 2987 insertions, 224 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 7fa82778c3e6..d567b1717c4c 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -356,6 +356,68 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
+config OSNOISE_TRACER
+ bool "OS Noise tracer"
+ select GENERIC_TRACER
+ help
+ In the context of high-performance computing (HPC), the Operating
+ System Noise (osnoise) refers to the interference experienced by an
+ application due to activities inside the operating system. In the
+ context of Linux, NMIs, IRQs, SoftIRQs, and any other system thread
+ can cause noise to the system. Moreover, hardware-related jobs can
+ also cause noise, for example, via SMIs.
+
+ The osnoise tracer leverages the hwlat_detector by running a similar
+ loop with preemption, SoftIRQs and IRQs enabled, thus allowing all
+ the sources of osnoise during its execution. The osnoise tracer takes
+ note of the entry and exit point of any source of interferences,
+ increasing a per-cpu interference counter. It saves an interference
+ counter for each source of interference. The interference counter for
+ NMI, IRQs, SoftIRQs, and threads is increased anytime the tool
+ observes these interferences' entry events. When a noise happens
+ without any interference from the operating system level, the
+ hardware noise counter increases, pointing to a hardware-related
+ noise. In this way, osnoise can account for any source of
+ interference. At the end of the period, the osnoise tracer prints
+ the sum of all noise, the max single noise, the percentage of CPU
+ available for the thread, and the counters for the noise sources.
+
+ In addition to the tracer, a set of tracepoints were added to
+ facilitate the identification of the osnoise source.
+
+ The output will appear in the trace and trace_pipe files.
+
+ To enable this tracer, echo in "osnoise" into the current_tracer
+ file.
+
+config TIMERLAT_TRACER
+ bool "Timerlat tracer"
+ select OSNOISE_TRACER
+ select GENERIC_TRACER
+ help
+ The timerlat tracer aims to help the preemptive kernel developers
+ to find sources of wakeup latencies of real-time threads.
+
+ The tracer creates a per-cpu kernel thread with real-time priority.
+ The tracer thread sets a periodic timer to wakeup itself, and goes
+ to sleep waiting for the timer to fire. At the wakeup, the thread
+ then computes a wakeup latency value as the difference between
+ the current time and the absolute time that the timer was set
+ to expire.
+
+ The tracer prints two lines at every activation. The first is the
+ timer latency observed at the hardirq context before the
+ activation of the thread. The second is the timer latency observed
+ by the thread, which is the same level that cyclictest reports. The
+ ACTIVATION ID field serves to relate the irq execution to its
+ respective thread execution.
+
+ The tracer is build on top of osnoise tracer, and the osnoise:
+ events can be used to trace the source of interference from NMI,
+ IRQs and other threads. It also enables the capture of the
+ stacktrace at the IRQ context, which helps to identify the code
+ path that can cause thread delay.
+
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on HAVE_MMIOTRACE_SUPPORT && PCI
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index b28d3e5013cd..b1c47ccf4f73 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -58,6 +58,7 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
+obj-$(CONFIG_OSNOISE_TRACER) += trace_osnoise.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 64bd2d84367f..b4916ef388ad 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1842,7 +1842,8 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
if (prog->aux->max_tp_access > btp->writable_size)
return -EINVAL;
- return tracepoint_probe_register(tp, (void *)btp->bpf_func, prog);
+ return tracepoint_probe_register_may_exist(tp, (void *)btp->bpf_func,
+ prog);
}
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2c0ee6484990..d1463eac11a3 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3391,7 +3391,7 @@ static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer,
case RINGBUF_TYPE_PADDING:
if (event->time_delta == 1)
break;
- /* fall through */
+ fallthrough;
case RINGBUF_TYPE_DATA:
ts += event->time_delta;
break;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 3c1384bc5c5a..f8b80b5bab71 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -87,6 +87,7 @@ void __init disable_tracing_selftest(const char *reason)
/* Pipe tracepoints to printk */
struct trace_iterator *tracepoint_print_iter;
int tracepoint_printk;
+static bool tracepoint_printk_stop_on_boot __initdata;
static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
/* For tracers that don't implement custom flags */
@@ -197,12 +198,12 @@ __setup("ftrace=", set_cmdline_ftrace);
static int __init set_ftrace_dump_on_oops(char *str)
{
- if (*str++ != '=' || !*str) {
+ if (*str++ != '=' || !*str || !strcmp("1", str)) {
ftrace_dump_on_oops = DUMP_ALL;
return 1;
}
- if (!strcmp("orig_cpu", str)) {
+ if (!strcmp("orig_cpu", str) || !strcmp("2", str)) {
ftrace_dump_on_oops = DUMP_ORIG;
return 1;
}
@@ -257,6 +258,13 @@ static int __init set_tracepoint_printk(char *str)
}
__setup("tp_printk", set_tracepoint_printk);
+static int __init set_tracepoint_printk_stop(char *str)
+{
+ tracepoint_printk_stop_on_boot = true;
+ return 1;
+}
+__setup("tp_printk_stop_on_boot", set_tracepoint_printk_stop);
+
unsigned long long ns2usecs(u64 nsec)
{
nsec += 500;
@@ -1683,8 +1691,7 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
unsigned long __read_mostly tracing_thresh;
static const struct file_operations tracing_max_lat_fops;
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#ifdef LATENCY_FS_NOTIFY
static struct workqueue_struct *fsnotify_wq;
@@ -2185,8 +2192,15 @@ void tracing_reset_all_online_cpus(void)
}
}
+/*
+ * The tgid_map array maps from pid to tgid; i.e. the value stored at index i
+ * is the tgid last observed corresponding to pid=i.
+ */
static int *tgid_map;
+/* The maximum valid index into tgid_map. */
+static size_t tgid_map_max;
+
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
@@ -2459,24 +2473,41 @@ void trace_find_cmdline(int pid, char comm[])
preempt_enable();
}
+static int *trace_find_tgid_ptr(int pid)
+{
+ /*
+ * Pairs with the smp_store_release in set_tracer_flag() to ensure that
+ * if we observe a non-NULL tgid_map then we also observe the correct
+ * tgid_map_max.
+ */
+ int *map = smp_load_acquire(&tgid_map);
+
+ if (unlikely(!map || pid > tgid_map_max))
+ return NULL;
+
+ return &map[pid];
+}
+
int trace_find_tgid(int pid)
{
- if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
- return 0;
+ int *ptr = trace_find_tgid_ptr(pid);
- return tgid_map[pid];
+ return ptr ? *ptr : 0;
}
static int trace_save_tgid(struct task_struct *tsk)
{
+ int *ptr;
+
/* treat recording of idle task as a success */
if (!tsk->pid)
return 1;
- if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
+ ptr = trace_find_tgid_ptr(tsk->pid);
+ if (!ptr)
return 0;
- tgid_map[tsk->pid] = tsk->tgid;
+ *ptr = tsk->tgid;
return 1;
}
@@ -2730,9 +2761,45 @@ trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
if (!tr->no_filter_buffering_ref &&
(trace_file->flags & (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
(entry = this_cpu_read(trace_buffered_event))) {
- /* Try to use the per cpu buffer first */
+ /*
+ * Filtering is on, so try to use the per cpu buffer first.
+ * This buffer will simulate a ring_buffer_event,
+ * where the type_len is zero and the array[0] will
+ * hold the full length.
+ * (see include/linux/ring-buffer.h for details on
+ * how the ring_buffer_event is structured).
+ *
+ * Using a temp buffer during filtering and copying it
+ * on a matched filter is quicker than writing directly
+ * into the ring buffer and then discarding it when
+ * it doesn't match. That is because the discard
+ * requires several atomic operations to get right.
+ * Copying on match and doing nothing on a failed match
+ * is still quicker than no copy on match, but having
+ * to discard out of the ring buffer on a failed match.
+ */
+ int max_len = PAGE_SIZE - struct_size(entry, array, 1);
+
val = this_cpu_inc_return(trace_buffered_event_cnt);
- if ((len < (PAGE_SIZE - sizeof(*entry) - sizeof(entry->array[0]))) && val == 1) {
+
+ /*
+ * Preemption is disabled, but interrupts and NMIs
+ * can still come in now. If that happens after
+ * the above increment, then it will have to go
+ * back to the old method of allocating the event
+ * on the ring buffer, and if the filter fails, it
+ * will have to call ring_buffer_discard_commit()
+ * to remove it.
+ *
+ * Need to also check the unlikely case that the
+ * length is bigger than the temp buffer size.
+ * If that happens, then the reserve is pretty much
+ * guaranteed to fail, as the ring buffer currently
+ * only allows events less than a page. But that may
+ * change in the future, so let the ring buffer reserve
+ * handle the failure in that case.
+ */
+ if (val == 1 && likely(len <= max_len)) {
trace_event_setup(entry, type, trace_ctx);
entry->array[0] = len;
return entry;
@@ -5172,6 +5239,8 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
{
+ int *map;
+
if ((mask == TRACE_ITER_RECORD_TGID) ||
(mask == TRACE_ITER_RECORD_CMD))
lockdep_assert_held(&event_mutex);
@@ -5194,10 +5263,19 @@ int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
trace_event_enable_cmd_record(enabled);
if (mask == TRACE_ITER_RECORD_TGID) {
- if (!tgid_map)
- tgid_map = kvcalloc(PID_MAX_DEFAULT + 1,
- sizeof(*tgid_map),
- GFP_KERNEL);
+ if (!tgid_map) {
+ tgid_map_max = pid_max;
+ map = kvcalloc(tgid_map_max + 1, sizeof(*tgid_map),
+ GFP_KERNEL);
+
+ /*
+ * Pairs with smp_load_acquire() in
+ * trace_find_tgid_ptr() to ensure that if it observes
+ * the tgid_map we just allocated then it also observes
+ * the corresponding tgid_map_max value.
+ */
+ smp_store_release(&tgid_map, map);
+ }
if (!tgid_map) {
tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
return -ENOMEM;
@@ -5609,37 +5687,16 @@ static const struct file_operations tracing_readme_fops = {
static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
{
- int *ptr = v;
-
- if (*pos || m->count)
- ptr++;
-
- (*pos)++;
-
- for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
- if (trace_find_tgid(*ptr))
- return ptr;
- }
+ int pid = ++(*pos);
- return NULL;
+ return trace_find_tgid_ptr(pid);
}
static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
{
- void *v;
- loff_t l = 0;
-
- if (!tgid_map)
- return NULL;
+ int pid = *pos;
- v = &tgid_map[0];
- while (l <= *pos) {
- v = saved_tgids_next(m, v, &l);
- if (!v)
- return NULL;
- }
-
- return v;
+ return trace_find_tgid_ptr(pid);
}
static void saved_tgids_stop(struct seq_file *m, void *v)
@@ -5648,9 +5705,14 @@ static void saved_tgids_stop(struct seq_file *m, void *v)
static int saved_tgids_show(struct seq_file *m, void *v)
{
- int pid = (int *)v - tgid_map;
+ int *entry = (int *)v;
+ int pid = entry - tgid_map;
+ int tgid = *entry;
- seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
+ if (tgid == 0)
+ return SEQ_SKIP;
+
+ seq_printf(m, "%d %d\n", pid, tgid);
return 0;
}
@@ -6135,7 +6197,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
unsigned long size, int cpu_id)
{
- int ret = size;
+ int ret;
mutex_lock(&trace_types_lock);
@@ -7529,6 +7591,91 @@ static const struct file_operations snapshot_raw_fops = {
#endif /* CONFIG_TRACER_SNAPSHOT */
+/*
+ * trace_min_max_write - Write a u64 value to a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the write interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param structure that
+ * defines where to write the value, the min and the max acceptable values,
+ * and a lock to protect the write.
+ */
+static ssize_t
+trace_min_max_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ u64 val;
+ int err;
+
+ if (!param)
+ return -EFAULT;
+
+ err = kstrtoull_from_user(ubuf, cnt, 10, &val);
+ if (err)
+ return err;
+
+ if (param->lock)
+ mutex_lock(param->lock);
+
+ if (param->min && val < *param->min)
+ err = -EINVAL;
+
+ if (param->max && val > *param->max)
+ err = -EINVAL;
+
+ if (!err)
+ *param->val = val;
+
+ if (param->lock)
+ mutex_unlock(param->lock);
+
+ if (err)
+ return err;
+
+ return cnt;
+}
+
+/*
+ * trace_min_max_read - Read a u64 value from a trace_min_max_param struct
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function implements the read interface for a struct trace_min_max_param.
+ * The filp->private_data must point to a trace_min_max_param struct with valid
+ * data.
+ */
+static ssize_t
+trace_min_max_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct trace_min_max_param *param = filp->private_data;
+ char buf[U64_STR_SIZE];
+ int len;
+ u64 val;
+
+ if (!param)
+ return -EFAULT;
+
+ val = *param->val;
+
+ if (cnt > sizeof(buf))
+ cnt = sizeof(buf);
+
+ len = snprintf(buf, sizeof(buf), "%llu\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+const struct file_operations trace_min_max_fops = {
+ .open = tracing_open_generic,
+ .read = trace_min_max_read,
+ .write = trace_min_max_write,
+};
+
#define TRACING_LOG_ERRS_MAX 8
#define TRACING_LOG_LOC_MAX 128
@@ -9532,6 +9679,8 @@ static __init int tracer_init_tracefs(void)
return 0;
}
+fs_initcall(tracer_init_tracefs);
+
static int trace_panic_handler(struct notifier_block *this,
unsigned long event, void *unused)
{
@@ -9952,7 +10101,7 @@ void __init trace_init(void)
trace_event_init();
}
-__init static int clear_boot_tracer(void)
+__init static void clear_boot_tracer(void)
{
/*
* The default tracer at boot buffer is an init section.
@@ -9962,26 +10111,21 @@ __init static int clear_boot_tracer(void)
* about to be freed.
*/
if (!default_bootup_tracer)
- return 0;
+ return;
printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
default_bootup_tracer);
default_bootup_tracer = NULL;
-
- return 0;
}
-fs_initcall(tracer_init_tracefs);
-late_initcall_sync(clear_boot_tracer);
-
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
-__init static int tracing_set_default_clock(void)
+__init static void tracing_set_default_clock(void)
{
/* sched_clock_stable() is determined in late_initcall */
if (!trace_boot_clock && !sched_clock_stable()) {
if (security_locked_down(LOCKDOWN_TRACEFS)) {
pr_warn("Can not set tracing clock due to lockdown\n");
- return -EPERM;
+ return;
}
printk(KERN_WARNING
@@ -9991,8 +10135,21 @@ __init static int tracing_set_default_clock(void)
"on the kernel command line\n");
tracing_set_clock(&global_trace, "global");
}
+}
+#else
+static inline void tracing_set_default_clock(void) { }
+#endif
+
+__init static int late_trace_init(void)
+{
+ if (tracepoint_printk && tracepoint_printk_stop_on_boot) {
+ static_key_disable(&tracepoint_printk_key.key);
+ tracepoint_printk = 0;
+ }
+ tracing_set_default_clock();
+ clear_boot_tracer();
return 0;
}
-late_initcall_sync(tracing_set_default_clock);
-#endif
+
+late_initcall_sync(late_trace_init);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index d5d8c088a55d..a180abf76d4e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -45,6 +45,8 @@ enum trace_type {
TRACE_BLK,
TRACE_BPUTS,
TRACE_HWLAT,
+ TRACE_OSNOISE,
+ TRACE_TIMERLAT,
TRACE_RAW_DATA,
TRACE_FUNC_REPEATS,
@@ -290,7 +292,8 @@ struct trace_array {
struct array_buffer max_buffer;
bool allocated_snapshot;
#endif
-#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
+#if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)
unsigned long max_latency;
#ifdef CONFIG_FSNOTIFY
struct dentry *d_max_latency;
@@ -438,6 +441,8 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
IF_ASSIGN(var, ent, struct hwlat_entry, TRACE_HWLAT); \
+ IF_ASSIGN(var, ent, struct osnoise_entry, TRACE_OSNOISE);\
+ IF_ASSIGN(var, ent, struct timerlat_entry, TRACE_TIMERLAT);\
IF_ASSIGN(var, ent, struct raw_data_entry, TRACE_RAW_DATA);\
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \
@@ -668,15 +673,15 @@ void update_max_tr_single(struct trace_array *tr,
struct task_struct *tsk, int cpu);
#endif /* CONFIG_TRACER_MAX_TRACE */
-#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)) && \
- defined(CONFIG_FSNOTIFY)
+#if (defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER) \
+ || defined(CONFIG_OSNOISE_TRACER)) && defined(CONFIG_FSNOTIFY)
+#define LATENCY_FS_NOTIFY
+#endif
+#ifdef LATENCY_FS_NOTIFY
void latency_fsnotify(struct trace_array *tr);
-
#else
-
static inline void latency_fsnotify(struct trace_array *tr) { }
-
#endif
#ifdef CONFIG_STACKTRACE
@@ -1945,4 +1950,22 @@ static inline bool is_good_name(const char *name)
return true;
}
+/*
+ * This is a generic way to read and write a u64 value from a file in tracefs.
+ *
+ * The value is stored on the variable pointed by *val. The value needs
+ * to be at least *min and at most *max. The write is protected by an
+ * existing *lock.
+ */
+struct trace_min_max_param {
+ struct mutex *lock;
+ u64 *val;
+ u64 *min;
+ u64 *max;
+};
+
+#define U64_STR_SIZE 24 /* 20 digits max */
+
+extern const struct file_operations trace_min_max_fops;
+
#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
index a82f03f385f8..94ef2d099e32 100644
--- a/kernel/trace/trace_boot.c
+++ b/kernel/trace/trace_boot.c
@@ -225,14 +225,37 @@ static void __init
trace_boot_init_events(struct trace_array *tr, struct xbc_node *node)
{
struct xbc_node *gnode, *enode;
+ bool enable, enable_all = false;
+ const char *data;
node = xbc_node_find_child(node, "event");
if (!node)
return;
/* per-event key starts with "event.GROUP.EVENT" */
- xbc_node_for_each_child(node, gnode)
- xbc_node_for_each_child(gnode, enode)
+ xbc_node_for_each_child(node, gnode) {
+ data = xbc_node_get_data(gnode);
+ if (!strcmp(data, "enable")) {
+ enable_all = true;
+ continue;
+ }
+ enable = false;
+ xbc_node_for_each_child(gnode, enode) {
+ data = xbc_node_get_data(enode);
+ if (!strcmp(data, "enable")) {
+ enable = true;
+ continue;
+ }
trace_boot_init_one_event(tr, gnode, enode);
+ }
+ /* Event enablement must be done after event settings */
+ if (enable) {
+ data = xbc_node_get_data(gnode);
+ trace_array_set_clr_event(tr, data, NULL, true);
+ }
+ }
+ /* Ditto */
+ if (enable_all)
+ trace_array_set_clr_event(tr, NULL, NULL, true);
}
#else
#define trace_boot_enable_events(tr, node) do {} while (0)
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 251c819cf0c5..cd41e863b51c 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -360,3 +360,44 @@ FTRACE_ENTRY(func_repeats, func_repeats_entry,
__entry->count,
FUNC_REPEATS_GET_DELTA_TS(__entry))
);
+
+FTRACE_ENTRY(osnoise, osnoise_entry,
+
+ TRACE_OSNOISE,
+
+ F_STRUCT(
+ __field( u64, noise )
+ __field( u64, runtime )
+ __field( u64, max_sample )
+ __field( unsigned int, hw_count )
+ __field( unsigned int, nmi_count )
+ __field( unsigned int, irq_count )
+ __field( unsigned int, softirq_count )
+ __field( unsigned int, thread_count )
+ ),
+
+ F_printk("noise:%llu\tmax_sample:%llu\thw:%u\tnmi:%u\tirq:%u\tsoftirq:%u\tthread:%u\n",
+ __entry->noise,
+ __entry->max_sample,
+ __entry->hw_count,
+ __entry->nmi_count,
+ __entry->irq_count,
+ __entry->softirq_count,
+ __entry->thread_count)
+);
+
+FTRACE_ENTRY(timerlat, timerlat_entry,
+
+ TRACE_TIMERLAT,
+
+ F_STRUCT(
+ __field( unsigned int, seqnum )
+ __field( int, context )
+ __field( u64, timer_latency )
+ ),
+
+ F_printk("seq:%u\tcontext:%d\ttimer_latency:%llu\n",
+ __entry->seqnum,
+ __entry->context,
+ __entry->timer_latency)
+);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index c1abd63f1d6c..ba03b7d84fc2 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -2434,12 +2434,12 @@ create_field_var_hist(struct hist_trigger_data *target_hist_data,
char *subsys_name, char *event_name, char *field_name)
{
struct trace_array *tr = target_hist_data->event_file->tr;
- struct hist_field *event_var = ERR_PTR(-EINVAL);
struct hist_trigger_data *hist_data;
unsigned int i, n, first = true;
struct field_var_hist *var_hist;
struct trace_event_file *file;
struct hist_field *key_field;
+ struct hist_field *event_var;
char *saved_filter;
char *cmd;
int ret;
@@ -5232,6 +5232,7 @@ static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
cmd = hist_data->field_var_hists[i]->cmd;
ret = event_hist_trigger_func(&trigger_hist_cmd, file,
"!hist", "hist", cmd);
+ WARN_ON_ONCE(ret < 0);
}
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index b8bfa8505b7b..cf84d0f6583a 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -916,7 +916,8 @@ void unpause_named_trigger(struct event_trigger_data *data)
/**
* set_named_trigger_data - Associate common named trigger data
- * @data: The trigger data of a named trigger to unpause
+ * @data: The trigger data to associate
+ * @named_data: The common named trigger to be associated
*
* Named triggers are sets of triggers that share a common set of
* trigger data. The first named trigger registered with a given name
diff --git a/kernel/trace/trace_hwlat.c b/kernel/trace/trace_hwlat.c
index 632ef88131a9..a6c0cdaf4b87 100644
--- a/kernel/trace/trace_hwlat.c
+++ b/kernel/trace/trace_hwlat.c
@@ -34,7 +34,7 @@
* Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
* Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
*
- * Includes useful feedback from Clark Williams <clark@redhat.com>
+ * Includes useful feedback from Clark Williams <williams@redhat.com>
*
*/
#include <linux/kthread.h>
@@ -54,20 +54,33 @@ static struct trace_array *hwlat_trace;
#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */
#define DEFAULT_LAT_THRESHOLD 10 /* 10us */
-/* sampling thread*/
-static struct task_struct *hwlat_kthread;
-
static struct dentry *hwlat_sample_width; /* sample width us */
static struct dentry *hwlat_sample_window; /* sample window us */
+static struct dentry *hwlat_thread_mode; /* hwlat thread mode */
+
+enum {
+ MODE_NONE = 0,
+ MODE_ROUND_ROBIN,
+ MODE_PER_CPU,
+ MODE_MAX
+};
+static char *thread_mode_str[] = { "none", "round-robin", "per-cpu" };
/* Save the previous tracing_thresh value */
static unsigned long save_tracing_thresh;
-/* NMI timestamp counters */
-static u64 nmi_ts_start;
-static u64 nmi_total_ts;
-static int nmi_count;
-static int nmi_cpu;
+/* runtime kthread data */
+struct hwlat_kthread_data {
+ struct task_struct *kthread;
+ /* NMI timestamp counters */
+ u64 nmi_ts_start;
+ u64 nmi_total_ts;
+ int nmi_count;
+ int nmi_cpu;
+};
+
+struct hwlat_kthread_data hwlat_single_cpu_data;
+DEFINE_PER_CPU(struct hwlat_kthread_data, hwlat_per_cpu_data);
/* Tells NMIs to call back to the hwlat tracer to record timestamps */
bool trace_hwlat_callback_enabled;
@@ -96,11 +109,24 @@ static struct hwlat_data {
u64 sample_window; /* total sampling window (on+off) */
u64 sample_width; /* active sampling portion of window */
+ int thread_mode; /* thread mode */
+
} hwlat_data = {
.sample_window = DEFAULT_SAMPLE_WINDOW,
.sample_width = DEFAULT_SAMPLE_WIDTH,
+ .thread_mode = MODE_ROUND_ROBIN
};
+static struct hwlat_kthread_data *get_cpu_data(void)
+{
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ return this_cpu_ptr(&hwlat_per_cpu_data);
+ else
+ return &hwlat_single_cpu_data;
+}
+
+static bool hwlat_busy;
+
static void trace_hwlat_sample(struct hwlat_sample *sample)
{
struct trace_array *tr = hwlat_trace;
@@ -136,7 +162,9 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
void trace_hwlat_callback(bool enter)
{
- if (smp_processor_id() != nmi_cpu)
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+
+ if (!kdata->kthread)
return;
/*
@@ -145,15 +173,24 @@ void trace_hwlat_callback(bool enter)
*/
if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
if (enter)
- nmi_ts_start = time_get();
+ kdata->nmi_ts_start = time_get();
else
- nmi_total_ts += time_get() - nmi_ts_start;
+ kdata->nmi_total_ts += time_get() - kdata->nmi_ts_start;
}
if (enter)
- nmi_count++;
+ kdata->nmi_count++;
}
+/*
+ * hwlat_err - report a hwlat error.
+ */
+#define hwlat_err(msg) ({ \
+ struct trace_array *tr = hwlat_trace; \
+ \
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
+})
+
/**
* get_sample - sample the CPU TSC and look for likely hardware latencies
*
@@ -163,6 +200,7 @@ void trace_hwlat_callback(bool enter)
*/
static int get_sample(void)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct trace_array *tr = hwlat_trace;
struct hwlat_sample s;
time_type start, t1, t2, last_t2;
@@ -175,9 +213,8 @@ static int get_sample(void)
do_div(thresh, NSEC_PER_USEC); /* modifies interval value */
- nmi_cpu = smp_processor_id();
- nmi_total_ts = 0;
- nmi_count = 0;
+ kdata->nmi_total_ts = 0;
+ kdata->nmi_count = 0;
/* Make sure NMIs see this first */
barrier();
@@ -197,7 +234,7 @@ static int get_sample(void)
outer_diff = time_to_us(time_sub(t1, last_t2));
/* This shouldn't happen */
if (outer_diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
if (outer_diff > outer_sample)
@@ -209,7 +246,7 @@ static int get_sample(void)
/* Check for possible overflows */
if (total < last_total) {
- pr_err("Time total overflowed\n");
+ hwlat_err("Time total overflowed\n");
break;
}
last_total = total;
@@ -225,7 +262,7 @@ static int get_sample(void)
/* This shouldn't happen */
if (diff < 0) {
- pr_err(BANNER "time running backwards\n");
+ hwlat_err(BANNER "time running backwards\n");
goto out;
}
@@ -247,15 +284,15 @@ static int get_sample(void)
ret = 1;
/* We read in microseconds */
- if (nmi_total_ts)
- do_div(nmi_total_ts, NSEC_PER_USEC);
+ if (kdata->nmi_total_ts)
+ do_div(kdata->nmi_total_ts, NSEC_PER_USEC);
hwlat_data.count++;
s.seqnum = hwlat_data.count;
s.duration = sample;
s.outer_duration = outer_sample;
- s.nmi_total_ts = nmi_total_ts;
- s.nmi_count = nmi_count;
+ s.nmi_total_ts = kdata->nmi_total_ts;
+ s.nmi_count = kdata->nmi_count;
s.count = count;
trace_hwlat_sample(&s);
@@ -273,7 +310,6 @@ out:
}
static struct cpumask save_cpumask;
-static bool disable_migrate;
static void move_to_next_cpu(void)
{
@@ -281,15 +317,13 @@ static void move_to_next_cpu(void)
struct trace_array *tr = hwlat_trace;
int next_cpu;
- if (disable_migrate)
- return;
/*
* If for some reason the user modifies the CPU affinity
* of this thread, then stop migrating for the duration
* of the current test.
*/
if (!cpumask_equal(current_mask, current->cpus_ptr))
- goto disable;
+ goto change_mode;
get_online_cpus();
cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
@@ -300,7 +334,7 @@ static void move_to_next_cpu(void)
next_cpu = cpumask_first(current_mask);
if (next_cpu >= nr_cpu_ids) /* Shouldn't happen! */
- goto disable;
+ goto change_mode;
cpumask_clear(current_mask);
cpumask_set_cpu(next_cpu, current_mask);
@@ -308,8 +342,9 @@ static void move_to_next_cpu(void)
sched_setaffinity(0, current_mask);
return;
- disable:
- disable_migrate = true;
+ change_mode:
+ hwlat_data.thread_mode = MODE_NONE;
+ pr_info(BANNER "cpumask changed while in round-robin mode, switching to mode none\n");
}
/*
@@ -328,7 +363,8 @@ static int kthread_fn(void *data)
while (!kthread_should_stop()) {
- move_to_next_cpu();
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN)
+ move_to_next_cpu();
local_irq_disable();
get_sample();
@@ -351,178 +387,380 @@ static int kthread_fn(void *data)
return 0;
}
-/**
- * start_kthread - Kick off the hardware latency sampling/detector kthread
+/*
+ * stop_stop_kthread - Inform the hardware latency sampling/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static void stop_single_kthread(void)
+{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
+ struct task_struct *kthread;
+
+ get_online_cpus();
+ kthread = kdata->kthread;
+
+ if (!kthread)
+ goto out_put_cpus;
+
+ kthread_stop(kthread);
+ kdata->kthread = NULL;
+
+out_put_cpus:
+ put_online_cpus();
+}
+
+
+/*
+ * start_single_kthread - Kick off the hardware latency sampling/detector kthread
*
* This starts the kernel thread that will sit and sample the CPU timestamp
* counter (TSC or similar) and look for potential hardware latencies.
*/
-static int start_kthread(struct trace_array *tr)
+static int start_single_kthread(struct trace_array *tr)
{
+ struct hwlat_kthread_data *kdata = get_cpu_data();
struct cpumask *current_mask = &save_cpumask;
struct task_struct *kthread;
int next_cpu;
- if (hwlat_kthread)
- return 0;
-
- /* Just pick the first CPU on first iteration */
get_online_cpus();
- cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
- put_online_cpus();
- next_cpu = cpumask_first(current_mask);
+ if (kdata->kthread)
+ goto out_put_cpus;
kthread = kthread_create(kthread_fn, NULL, "hwlatd");
if (IS_ERR(kthread)) {
pr_err(BANNER "could not start sampling thread\n");
+ put_online_cpus();
return -ENOMEM;
}
- cpumask_clear(current_mask);
- cpumask_set_cpu(next_cpu, current_mask);
+ /* Just pick the first CPU on first iteration */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ if (hwlat_data.thread_mode == MODE_ROUND_ROBIN) {
+ next_cpu = cpumask_first(current_mask);
+ cpumask_clear(current_mask);
+ cpumask_set_cpu(next_cpu, current_mask);
+
+ }
+
sched_setaffinity(kthread->pid, current_mask);
- hwlat_kthread = kthread;
+ kdata->kthread = kthread;
wake_up_process(kthread);
+out_put_cpus:
+ put_online_cpus();
return 0;
}
-/**
- * stop_kthread - Inform the hardware latency sampling/detector kthread to stop
+/*
+ * stop_cpu_kthread - Stop a hwlat cpu kthread
+ */
+static void stop_cpu_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(hwlat_per_cpu_data, cpu).kthread;
+ if (kthread)
+ kthread_stop(kthread);
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+}
+
+/*
+ * stop_per_cpu_kthreads - Inform the hardware latency sampling/detector kthread to stop
*
- * This kicks the running hardware latency sampling/detector kernel thread and
+ * This kicks the running hardware latency sampling/detector kernel threads and
* tells it to stop sampling now. Use this on unload and at system shutdown.
*/
-static void stop_kthread(void)
+static void stop_per_cpu_kthreads(void)
{
- if (!hwlat_kthread)
- return;
- kthread_stop(hwlat_kthread);
- hwlat_kthread = NULL;
+ unsigned int cpu;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ stop_cpu_kthread(cpu);
+ put_online_cpus();
}
/*
- * hwlat_read - Wrapper read function for reading both window and width
- * @filp: The active open file structure
- * @ubuf: The userspace provided buffer to read value into
- * @cnt: The maximum number of bytes to read
- * @ppos: The current "file" position
- *
- * This function provides a generic read implementation for the global state
- * "hwlat_data" structure filesystem entries.
+ * start_cpu_kthread - Start a hwlat cpu kthread
*/
-static ssize_t hwlat_read(struct file *filp, char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_cpu_kthread(unsigned int cpu)
{
- char buf[U64STR_SIZE];
- u64 *entry = filp->private_data;
- u64 val;
- int len;
+ struct task_struct *kthread;
+ char comm[24];
- if (!entry)
- return -EFAULT;
+ snprintf(comm, 24, "hwlatd/%d", cpu);
+
+ kthread = kthread_create_on_cpu(kthread_fn, NULL, cpu, comm);
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ return -ENOMEM;
+ }
- if (cnt > sizeof(buf))
- cnt = sizeof(buf);
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = kthread;
+ wake_up_process(kthread);
+
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void hwlat_hotplug_workfn(struct work_struct *dummy)
+{
+ struct trace_array *tr = hwlat_trace;
+ unsigned int cpu = smp_processor_id();
+
+ mutex_lock(&trace_types_lock);
+ mutex_lock(&hwlat_data.lock);
+ get_online_cpus();
+
+ if (!hwlat_busy || hwlat_data.thread_mode != MODE_PER_CPU)
+ goto out_unlock;
- val = *entry;
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
- len = snprintf(buf, sizeof(buf), "%llu\n", val);
+ start_cpu_kthread(cpu);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+out_unlock:
+ put_online_cpus();
+ mutex_unlock(&hwlat_data.lock);
+ mutex_unlock(&trace_types_lock);
}
-/**
- * hwlat_width_write - Write function for "width" entry
- * @filp: The active open file structure
- * @ubuf: The user buffer that contains the value to write
- * @cnt: The maximum number of bytes to write to "file"
- * @ppos: The current position in @file
+static DECLARE_WORK(hwlat_hotplug_work, hwlat_hotplug_workfn);
+
+/*
+ * hwlat_cpu_init - CPU hotplug online callback function
+ */
+static int hwlat_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &hwlat_hotplug_work);
+ return 0;
+}
+
+/*
+ * hwlat_cpu_die - CPU hotplug offline callback function
+ */
+static int hwlat_cpu_die(unsigned int cpu)
+{
+ stop_cpu_kthread(cpu);
+ return 0;
+}
+
+static void hwlat_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/hwlat:online",
+ hwlat_cpu_init, hwlat_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void hwlat_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * start_per_cpu_kthreads - Kick off the hardware latency sampling/detector kthreads
*
- * This function provides a write implementation for the "width" interface
- * to the hardware latency detector. It can be used to configure
- * for how many us of the total window us we will actively sample for any
- * hardware-induced latency periods. Obviously, it is not possible to
- * sample constantly and have the system respond to a sample reader, or,
- * worse, without having the system appear to have gone out to lunch. It
- * is enforced that width is less that the total window size.
+ * This starts the kernel threads that will sit on potentially all cpus and
+ * sample the CPU timestamp counter (TSC or similar) and look for potential
+ * hardware latencies.
*/
-static ssize_t
-hwlat_width_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static int start_per_cpu_kthreads(struct trace_array *tr)
{
- u64 val;
- int err;
+ struct cpumask *current_mask = &save_cpumask;
+ unsigned int cpu;
+ int retval;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ get_online_cpus();
+ /*
+ * Run only on CPUs in which hwlat is allowed to run.
+ */
+ cpumask_and(current_mask, cpu_online_mask, tr->tracing_cpumask);
+
+ for_each_online_cpu(cpu)
+ per_cpu(hwlat_per_cpu_data, cpu).kthread = NULL;
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_cpu_kthread(cpu);
+ if (retval)
+ goto out_error;
+ }
+ put_online_cpus();
+
+ return 0;
+
+out_error:
+ put_online_cpus();
+ stop_per_cpu_kthreads();
+ return retval;
+}
+
+static void *s_mode_start(struct seq_file *s, loff_t *pos)
+{
+ int mode = *pos;
mutex_lock(&hwlat_data.lock);
- if (val < hwlat_data.sample_window)
- hwlat_data.sample_width = val;
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static void *s_mode_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ int mode = ++(*pos);
+
+ if (mode >= MODE_MAX)
+ return NULL;
+
+ return pos;
+}
+
+static int s_mode_show(struct seq_file *s, void *v)
+{
+ loff_t *pos = v;
+ int mode = *pos;
+
+ if (mode == hwlat_data.thread_mode)
+ seq_printf(s, "[%s]", thread_mode_str[mode]);
else
- err = -EINVAL;
- mutex_unlock(&hwlat_data.lock);
+ seq_printf(s, "%s", thread_mode_str[mode]);
- if (err)
- return err;
+ if (mode != MODE_MAX)
+ seq_puts(s, " ");
+
+ return 0;
+}
- return cnt;
+static void s_mode_stop(struct seq_file *s, void *v)
+{
+ seq_puts(s, "\n");
+ mutex_unlock(&hwlat_data.lock);
}
+static const struct seq_operations thread_mode_seq_ops = {
+ .start = s_mode_start,
+ .next = s_mode_next,
+ .show = s_mode_show,
+ .stop = s_mode_stop
+};
+
+static int hwlat_mode_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &thread_mode_seq_ops);
+};
+
+static void hwlat_tracer_start(struct trace_array *tr);
+static void hwlat_tracer_stop(struct trace_array *tr);
+
/**
- * hwlat_window_write - Write function for "window" entry
+ * hwlat_mode_write - Write function for "mode" entry
* @filp: The active open file structure
* @ubuf: The user buffer that contains the value to write
* @cnt: The maximum number of bytes to write to "file"
* @ppos: The current position in @file
*
- * This function provides a write implementation for the "window" interface
- * to the hardware latency detector. The window is the total time
- * in us that will be considered one sample period. Conceptually, windows
- * occur back-to-back and contain a sample width period during which
- * actual sampling occurs. Can be used to write a new total window size. It
- * is enforced that any value written must be greater than the sample width
- * size, or an error results.
+ * This function provides a write implementation for the "mode" interface
+ * to the hardware latency detector. hwlatd has different operation modes.
+ * The "none" sets the allowed cpumask for a single hwlatd thread at the
+ * startup and lets the scheduler handle the migration. The default mode is
+ * the "round-robin" one, in which a single hwlatd thread runs, migrating
+ * among the allowed CPUs in a round-robin fashion. The "per-cpu" mode
+ * creates one hwlatd thread per allowed CPU.
*/
-static ssize_t
-hwlat_window_write(struct file *filp, const char __user *ubuf,
- size_t cnt, loff_t *ppos)
+static ssize_t hwlat_mode_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
{
- u64 val;
- int err;
+ struct trace_array *tr = hwlat_trace;
+ const char *mode;
+ char buf[64];
+ int ret, i;
- err = kstrtoull_from_user(ubuf, cnt, 10, &val);
- if (err)
- return err;
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ mode = strstrip(buf);
+
+ ret = -EINVAL;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop
+ * and hwlat_busy.
+ */
+ mutex_lock(&trace_types_lock);
+ if (hwlat_busy)
+ hwlat_tracer_stop(tr);
mutex_lock(&hwlat_data.lock);
- if (hwlat_data.sample_width < val)
- hwlat_data.sample_window = val;
- else
- err = -EINVAL;
+
+ for (i = 0; i < MODE_MAX; i++) {
+ if (strcmp(mode, thread_mode_str[i]) == 0) {
+ hwlat_data.thread_mode = i;
+ ret = cnt;
+ }
+ }
+
mutex_unlock(&hwlat_data.lock);
- if (err)
- return err;
+ if (hwlat_busy)
+ hwlat_tracer_start(tr);
+ mutex_unlock(&trace_types_lock);
+
+ *ppos += cnt;
- return cnt;
+
+
+ return ret;
}
-static const struct file_operations width_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_width_write,
+/*
+ * The width parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is upper
+ * bounded by the window parameter.
+ */
+static struct trace_min_max_param hwlat_width = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_width,
+ .max = &hwlat_data.sample_window,
+ .min = NULL,
};
-static const struct file_operations window_fops = {
- .open = tracing_open_generic,
- .read = hwlat_read,
- .write = hwlat_window_write,
+/*
+ * The window parameter is read/write using the generic trace_min_max_param
+ * method. The *val is protected by the hwlat_data lock and is lower
+ * bounded by the width parameter.
+ */
+static struct trace_min_max_param hwlat_window = {
+ .lock = &hwlat_data.lock,
+ .val = &hwlat_data.sample_window,
+ .max = NULL,
+ .min = &hwlat_data.sample_width,
};
+static const struct file_operations thread_mode_fops = {
+ .open = hwlat_mode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = hwlat_mode_write
+};
/**
* init_tracefs - A function to initialize the tracefs interface files
*
@@ -546,18 +784,25 @@ static int init_tracefs(void)
hwlat_sample_window = tracefs_create_file("window", 0640,
top_dir,
- &hwlat_data.sample_window,
- &window_fops);
+ &hwlat_window,
+ &trace_min_max_fops);
if (!hwlat_sample_window)
goto err;
hwlat_sample_width = tracefs_create_file("width", 0644,
top_dir,
- &hwlat_data.sample_width,
- &width_fops);
+ &hwlat_width,
+ &trace_min_max_fops);
if (!hwlat_sample_width)
goto err;
+ hwlat_thread_mode = trace_create_file("mode", 0644,
+ top_dir,
+ NULL,
+ &thread_mode_fops);
+ if (!hwlat_thread_mode)
+ goto err;
+
return 0;
err:
@@ -569,18 +814,22 @@ static void hwlat_tracer_start(struct trace_array *tr)
{
int err;
- err = start_kthread(tr);
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ err = start_per_cpu_kthreads(tr);
+ else
+ err = start_single_kthread(tr);
if (err)
pr_err(BANNER "Cannot start hwlat kthread\n");
}
static void hwlat_tracer_stop(struct trace_array *tr)
{
- stop_kthread();
+ if (hwlat_data.thread_mode == MODE_PER_CPU)
+ stop_per_cpu_kthreads();
+ else
+ stop_single_kthread();
}
-static bool hwlat_busy;
-
static int hwlat_tracer_init(struct trace_array *tr)
{
/* Only allow one instance to enable this */
@@ -589,7 +838,6 @@ static int hwlat_tracer_init(struct trace_array *tr)
hwlat_trace = tr;
- disable_migrate = false;
hwlat_data.count = 0;
tr->max_latency = 0;
save_tracing_thresh = tracing_thresh;
@@ -608,7 +856,7 @@ static int hwlat_tracer_init(struct trace_array *tr)
static void hwlat_tracer_reset(struct trace_array *tr)
{
- stop_kthread();
+ hwlat_tracer_stop(tr);
/* the tracing threshold is static between runs */
last_tracing_thresh = tracing_thresh;
@@ -637,6 +885,8 @@ __init static int init_hwlat_tracer(void)
if (ret)
return ret;
+ hwlat_init_hotplug_support();
+
init_tracefs();
return 0;
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
new file mode 100644
index 000000000000..a7e3c24dee13
--- /dev/null
+++ b/kernel/trace/trace_osnoise.c
@@ -0,0 +1,2059 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * OS Noise Tracer: computes the OS Noise suffered by a running thread.
+ * Timerlat Tracer: measures the wakeup latency of a timer triggered IRQ and thread.
+ *
+ * Based on "hwlat_detector" tracer by:
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ * Copyright (C) 2013-2016 Steven Rostedt, Red Hat, Inc. <srostedt@redhat.com>
+ * With feedback from Clark Williams <williams@redhat.com>
+ *
+ * And also based on the rtsl tracer presented on:
+ * DE OLIVEIRA, Daniel Bristot, et al. Demystifying the real-time linux
+ * scheduling latency. In: 32nd Euromicro Conference on Real-Time Systems
+ * (ECRTS 2020). Schloss Dagstuhl-Leibniz-Zentrum fur Informatik, 2020.
+ *
+ * Copyright (C) 2021 Daniel Bristot de Oliveira, Red Hat, Inc. <bristot@redhat.com>
+ */
+
+#include <linux/kthread.h>
+#include <linux/tracefs.h>
+#include <linux/uaccess.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/sched/clock.h>
+#include <uapi/linux/sched/types.h>
+#include <linux/sched.h>
+#include "trace.h"
+
+#ifdef CONFIG_X86_LOCAL_APIC
+#include <asm/trace/irq_vectors.h>
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#include <trace/events/irq.h>
+#include <trace/events/sched.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/osnoise.h>
+
+static struct trace_array *osnoise_trace;
+
+/*
+ * Default values.
+ */
+#define BANNER "osnoise: "
+#define DEFAULT_SAMPLE_PERIOD 1000000 /* 1s */
+#define DEFAULT_SAMPLE_RUNTIME 1000000 /* 1s */
+
+#define DEFAULT_TIMERLAT_PERIOD 1000 /* 1ms */
+#define DEFAULT_TIMERLAT_PRIO 95 /* FIFO 95 */
+
+/*
+ * NMI runtime info.
+ */
+struct osn_nmi {
+ u64 count;
+ u64 delta_start;
+};
+
+/*
+ * IRQ runtime info.
+ */
+struct osn_irq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+#define IRQ_CONTEXT 0
+#define THREAD_CONTEXT 1
+/*
+ * sofirq runtime info.
+ */
+struct osn_softirq {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * thread runtime info.
+ */
+struct osn_thread {
+ u64 count;
+ u64 arrival_time;
+ u64 delta_start;
+};
+
+/*
+ * Runtime information: this structure saves the runtime information used by
+ * one sampling thread.
+ */
+struct osnoise_variables {
+ struct task_struct *kthread;
+ bool sampling;
+ pid_t pid;
+ struct osn_nmi nmi;
+ struct osn_irq irq;
+ struct osn_softirq softirq;
+ struct osn_thread thread;
+ local_t int_counter;
+};
+
+/*
+ * Per-cpu runtime information.
+ */
+DEFINE_PER_CPU(struct osnoise_variables, per_cpu_osnoise_var);
+
+/*
+ * this_cpu_osn_var - Return the per-cpu osnoise_variables on its relative CPU
+ */
+static inline struct osnoise_variables *this_cpu_osn_var(void)
+{
+ return this_cpu_ptr(&per_cpu_osnoise_var);
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Runtime information for the timer mode.
+ */
+struct timerlat_variables {
+ struct task_struct *kthread;
+ struct hrtimer timer;
+ u64 rel_period;
+ u64 abs_period;
+ bool tracing_thread;
+ u64 count;
+};
+
+DEFINE_PER_CPU(struct timerlat_variables, per_cpu_timerlat_var);
+
+/*
+ * this_cpu_tmr_var - Return the per-cpu timerlat_variables on its relative CPU
+ */
+static inline struct timerlat_variables *this_cpu_tmr_var(void)
+{
+ return this_cpu_ptr(&per_cpu_timerlat_var);
+}
+
+/*
+ * tlat_var_reset - Reset the values of the given timerlat_variables
+ */
+static inline void tlat_var_reset(void)
+{
+ struct timerlat_variables *tlat_var;
+ int cpu;
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ tlat_var = per_cpu_ptr(&per_cpu_timerlat_var, cpu);
+ memset(tlat_var, 0, sizeof(*tlat_var));
+ }
+}
+#else /* CONFIG_TIMERLAT_TRACER */
+#define tlat_var_reset() do {} while (0)
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * osn_var_reset - Reset the values of the given osnoise_variables
+ */
+static inline void osn_var_reset(void)
+{
+ struct osnoise_variables *osn_var;
+ int cpu;
+
+ /*
+ * So far, all the values are initialized as 0, so
+ * zeroing the structure is perfect.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ osn_var = per_cpu_ptr(&per_cpu_osnoise_var, cpu);
+ memset(osn_var, 0, sizeof(*osn_var));
+ }
+}
+
+/*
+ * osn_var_reset_all - Reset the value of all per-cpu osnoise_variables
+ */
+static inline void osn_var_reset_all(void)
+{
+ osn_var_reset();
+ tlat_var_reset();
+}
+
+/*
+ * Tells NMIs to call back to the osnoise tracer to record timestamps.
+ */
+bool trace_osnoise_callback_enabled;
+
+/*
+ * osnoise sample structure definition. Used to store the statistics of a
+ * sample run.
+ */
+struct osnoise_sample {
+ u64 runtime; /* runtime */
+ u64 noise; /* noise */
+ u64 max_sample; /* max single noise sample */
+ int hw_count; /* # HW (incl. hypervisor) interference */
+ int nmi_count; /* # NMIs during this sample */
+ int irq_count; /* # IRQs during this sample */
+ int softirq_count; /* # softirqs during this sample */
+ int thread_count; /* # threads during this sample */
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat sample structure definition. Used to store the statistics of
+ * a sample run.
+ */
+struct timerlat_sample {
+ u64 timer_latency; /* timer_latency */
+ unsigned int seqnum; /* unique sequence */
+ int context; /* timer context */
+};
+#endif
+
+/*
+ * Protect the interface.
+ */
+struct mutex interface_lock;
+
+/*
+ * Tracer data.
+ */
+static struct osnoise_data {
+ u64 sample_period; /* total sampling period */
+ u64 sample_runtime; /* active sampling portion of period */
+ u64 stop_tracing; /* stop trace in the internal operation (loop/irq) */
+ u64 stop_tracing_total; /* stop trace in the final operation (report/thread) */
+#ifdef CONFIG_TIMERLAT_TRACER
+ u64 timerlat_period; /* timerlat period */
+ u64 print_stack; /* print IRQ stack if total > */
+ int timerlat_tracer; /* timerlat tracer */
+#endif
+ bool tainted; /* infor users and developers about a problem */
+} osnoise_data = {
+ .sample_period = DEFAULT_SAMPLE_PERIOD,
+ .sample_runtime = DEFAULT_SAMPLE_RUNTIME,
+ .stop_tracing = 0,
+ .stop_tracing_total = 0,
+#ifdef CONFIG_TIMERLAT_TRACER
+ .print_stack = 0,
+ .timerlat_period = DEFAULT_TIMERLAT_PERIOD,
+ .timerlat_tracer = 0,
+#endif
+};
+
+/*
+ * Boolean variable used to inform that the tracer is currently sampling.
+ */
+static bool osnoise_busy;
+
+/*
+ * Print the osnoise header info.
+ */
+static void print_osnoise_headers(struct seq_file *s)
+{
+ if (osnoise_data.tainted)
+ seq_puts(s, "# osnoise is tainted!\n");
+
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth ");
+ seq_puts(s, " MAX\n");
+
+ seq_puts(s, "# || / ");
+ seq_puts(s, " SINGLE Interference counters:\n");
+
+ seq_puts(s, "# |||| RUNTIME ");
+ seq_puts(s, " NOISE %% OF CPU NOISE +-----------------------------+\n");
+
+ seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP IN US ");
+ seq_puts(s, " IN US AVAILABLE IN US HW NMI IRQ SIRQ THREAD\n");
+
+ seq_puts(s, "# | | | |||| | | ");
+ seq_puts(s, " | | | | | | | |\n");
+}
+
+/*
+ * osnoise_taint - report an osnoise error.
+ */
+#define osnoise_taint(msg) ({ \
+ struct trace_array *tr = osnoise_trace; \
+ \
+ trace_array_printk_buf(tr->array_buffer.buffer, _THIS_IP_, msg); \
+ osnoise_data.tainted = true; \
+})
+
+/*
+ * Record an osnoise_sample into the tracer buffer.
+ */
+static void trace_osnoise_sample(struct osnoise_sample *sample)
+{
+ struct trace_array *tr = osnoise_trace;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct trace_event_call *call = &event_osnoise;
+ struct ring_buffer_event *event;
+ struct osnoise_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_OSNOISE, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->runtime = sample->runtime;
+ entry->noise = sample->noise;
+ entry->max_sample = sample->max_sample;
+ entry->hw_count = sample->hw_count;
+ entry->nmi_count = sample->nmi_count;
+ entry->irq_count = sample->irq_count;
+ entry->softirq_count = sample->softirq_count;
+ entry->thread_count = sample->thread_count;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * Print the timerlat header info.
+ */
+static void print_timerlat_headers(struct seq_file *s)
+{
+ seq_puts(s, "# _-----=> irqs-off\n");
+ seq_puts(s, "# / _----=> need-resched\n");
+ seq_puts(s, "# | / _---=> hardirq/softirq\n");
+ seq_puts(s, "# || / _--=> preempt-depth\n");
+ seq_puts(s, "# || /\n");
+ seq_puts(s, "# |||| ACTIVATION\n");
+ seq_puts(s, "# TASK-PID CPU# |||| TIMESTAMP ID ");
+ seq_puts(s, " CONTEXT LATENCY\n");
+ seq_puts(s, "# | | | |||| | | ");
+ seq_puts(s, " | |\n");
+}
+
+/*
+ * Record an timerlat_sample into the tracer buffer.
+ */
+static void trace_timerlat_sample(struct timerlat_sample *sample)
+{
+ struct trace_array *tr = osnoise_trace;
+ struct trace_event_call *call = &event_osnoise;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct timerlat_entry *entry;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_TIMERLAT, sizeof(*entry),
+ tracing_gen_ctx());
+ if (!event)
+ return;
+ entry = ring_buffer_event_data(event);
+ entry->seqnum = sample->seqnum;
+ entry->context = sample->context;
+ entry->timer_latency = sample->timer_latency;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+}
+
+#ifdef CONFIG_STACKTRACE
+
+#define MAX_CALLS 256
+
+/*
+ * Stack trace will take place only at IRQ level, so, no need
+ * to control nesting here.
+ */
+struct trace_stack {
+ int stack_size;
+ int nr_entries;
+ unsigned long calls[MAX_CALLS];
+};
+
+static DEFINE_PER_CPU(struct trace_stack, trace_stack);
+
+/*
+ * timerlat_save_stack - save a stack trace without printing
+ *
+ * Save the current stack trace without printing. The
+ * stack will be printed later, after the end of the measurement.
+ */
+static void timerlat_save_stack(int skip)
+{
+ unsigned int size, nr_entries;
+ struct trace_stack *fstack;
+
+ fstack = this_cpu_ptr(&trace_stack);
+
+ size = ARRAY_SIZE(fstack->calls);
+
+ nr_entries = stack_trace_save(fstack->calls, size, skip);
+
+ fstack->stack_size = nr_entries * sizeof(unsigned long);
+ fstack->nr_entries = nr_entries;
+
+ return;
+
+}
+/*
+ * timerlat_dump_stack - dump a stack trace previously saved
+ *
+ * Dump a saved stack trace into the trace buffer.
+ */
+static void timerlat_dump_stack(void)
+{
+ struct trace_event_call *call = &event_osnoise;
+ struct trace_array *tr = osnoise_trace;
+ struct trace_buffer *buffer = tr->array_buffer.buffer;
+ struct ring_buffer_event *event;
+ struct trace_stack *fstack;
+ struct stack_entry *entry;
+ unsigned int size;
+
+ preempt_disable_notrace();
+ fstack = this_cpu_ptr(&trace_stack);
+ size = fstack->stack_size;
+
+ event = trace_buffer_lock_reserve(buffer, TRACE_STACK, sizeof(*entry) + size,
+ tracing_gen_ctx());
+ if (!event)
+ goto out;
+
+ entry = ring_buffer_event_data(event);
+
+ memcpy(&entry->caller, fstack->calls, size);
+ entry->size = fstack->nr_entries;
+
+ if (!call_filter_check_discard(call, entry, buffer, event))
+ trace_buffer_unlock_commit_nostack(buffer, event);
+
+out:
+ preempt_enable_notrace();
+}
+#else
+#define timerlat_dump_stack() do {} while (0)
+#define timerlat_save_stack(a) do {} while (0)
+#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * Macros to encapsulate the time capturing infrastructure.
+ */
+#define time_get() trace_clock_local()
+#define time_to_us(x) div_u64(x, 1000)
+#define time_sub(a, b) ((a) - (b))
+
+/*
+ * cond_move_irq_delta_start - Forward the delta_start of a running IRQ
+ *
+ * If an IRQ is preempted by an NMI, its delta_start is pushed forward
+ * to discount the NMI interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_irq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->irq.delta_start)
+ osn_var->irq.delta_start += duration;
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * cond_move_softirq_delta_start - Forward the delta_start of a running softirq.
+ *
+ * If a softirq is preempted by an IRQ or NMI, its delta_start is pushed
+ * forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_softirq_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->softirq.delta_start)
+ osn_var->softirq.delta_start += duration;
+}
+#else /* CONFIG_PREEMPT_RT */
+#define cond_move_softirq_delta_start(osn_var, duration) do {} while (0)
+#endif
+
+/*
+ * cond_move_thread_delta_start - Forward the delta_start of a running thread
+ *
+ * If a noisy thread is preempted by an softirq, IRQ or NMI, its delta_start
+ * is pushed forward to discount the interference.
+ *
+ * See get_int_safe_duration().
+ */
+static inline void
+cond_move_thread_delta_start(struct osnoise_variables *osn_var, u64 duration)
+{
+ if (osn_var->thread.delta_start)
+ osn_var->thread.delta_start += duration;
+}
+
+/*
+ * get_int_safe_duration - Get the duration of a window
+ *
+ * The irq, softirq and thread varaibles need to have its duration without
+ * the interference from higher priority interrupts. Instead of keeping a
+ * variable to discount the interrupt interference from these variables, the
+ * starting time of these variables are pushed forward with the interrupt's
+ * duration. In this way, a single variable is used to:
+ *
+ * - Know if a given window is being measured.
+ * - Account its duration.
+ * - Discount the interference.
+ *
+ * To avoid getting inconsistent values, e.g.,:
+ *
+ * now = time_get()
+ * ---> interrupt!
+ * delta_start -= int duration;
+ * <---
+ * duration = now - delta_start;
+ *
+ * result: negative duration if the variable duration before the
+ * interrupt was smaller than the interrupt execution.
+ *
+ * A counter of interrupts is used. If the counter increased, try
+ * to capture an interference safe duration.
+ */
+static inline s64
+get_int_safe_duration(struct osnoise_variables *osn_var, u64 *delta_start)
+{
+ u64 int_counter, now;
+ s64 duration;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ now = time_get();
+ duration = (now - *delta_start);
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ /*
+ * This is an evidence of race conditions that cause
+ * a value to be "discounted" too much.
+ */
+ if (duration < 0)
+ osnoise_taint("Negative duration!\n");
+
+ *delta_start = 0;
+
+ return duration;
+}
+
+/*
+ *
+ * set_int_safe_time - Save the current time on *time, aware of interference
+ *
+ * Get the time, taking into consideration a possible interference from
+ * higher priority interrupts.
+ *
+ * See get_int_safe_duration() for an explanation.
+ */
+static u64
+set_int_safe_time(struct osnoise_variables *osn_var, u64 *time)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *time = time_get();
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * copy_int_safe_time - Copy *src into *desc aware of interference
+ */
+static u64
+copy_int_safe_time(struct osnoise_variables *osn_var, u64 *dst, u64 *src)
+{
+ u64 int_counter;
+
+ do {
+ int_counter = local_read(&osn_var->int_counter);
+ /* synchronize with interrupts */
+ barrier();
+
+ *dst = *src;
+
+ /* synchronize with interrupts */
+ barrier();
+ } while (int_counter != local_read(&osn_var->int_counter));
+
+ return int_counter;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * trace_osnoise_callback - NMI entry/exit callback
+ *
+ * This function is called at the entry and exit NMI code. The bool enter
+ * distinguishes between either case. This function is used to note a NMI
+ * occurrence, compute the noise caused by the NMI, and to remove the noise
+ * it is potentially causing on other interference variables.
+ */
+void trace_osnoise_callback(bool enter)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ u64 duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ /*
+ * Currently trace_clock_local() calls sched_clock() and the
+ * generic version is not NMI safe.
+ */
+ if (!IS_ENABLED(CONFIG_GENERIC_SCHED_CLOCK)) {
+ if (enter) {
+ osn_var->nmi.delta_start = time_get();
+ local_inc(&osn_var->int_counter);
+ } else {
+ duration = time_get() - osn_var->nmi.delta_start;
+
+ trace_nmi_noise(osn_var->nmi.delta_start, duration);
+
+ cond_move_irq_delta_start(osn_var, duration);
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ }
+ }
+
+ if (enter)
+ osn_var->nmi.count++;
+}
+
+/*
+ * osnoise_trace_irq_entry - Note the starting of an IRQ
+ *
+ * Save the starting time of an IRQ. As IRQs are non-preemptive to other IRQs,
+ * it is safe to use a single variable (ons_var->irq) to save the statistics.
+ * The arrival_time is used to report... the arrival time. The delta_start
+ * is used to compute the duration at the IRQ exit handler. See
+ * cond_move_irq_delta_start().
+ */
+void osnoise_trace_irq_entry(int id)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->irq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->irq.delta_start);
+ osn_var->irq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * osnoise_irq_exit - Note the end of an IRQ, sava data and trace
+ *
+ * Computes the duration of the IRQ noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+void osnoise_trace_irq_exit(int id, const char *desc)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+ duration = get_int_safe_duration(osn_var, &osn_var->irq.delta_start);
+ trace_irq_noise(id, desc, osn_var->irq.arrival_time, duration);
+ osn_var->irq.arrival_time = 0;
+ cond_move_softirq_delta_start(osn_var, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+}
+
+/*
+ * trace_irqentry_callback - Callback to the irq:irq_entry traceevent
+ *
+ * Used to note the starting of an IRQ occurece.
+ */
+static void trace_irqentry_callback(void *data, int irq,
+ struct irqaction *action)
+{
+ osnoise_trace_irq_entry(irq);
+}
+
+/*
+ * trace_irqexit_callback - Callback to the irq:irq_exit traceevent
+ *
+ * Used to note the end of an IRQ occurece.
+ */
+static void trace_irqexit_callback(void *data, int irq,
+ struct irqaction *action, int ret)
+{
+ osnoise_trace_irq_exit(irq, action->name);
+}
+
+/*
+ * arch specific register function.
+ */
+int __weak osnoise_arch_register(void)
+{
+ return 0;
+}
+
+/*
+ * arch specific unregister function.
+ */
+void __weak osnoise_arch_unregister(void)
+{
+ return;
+}
+
+/*
+ * hook_irq_events - Hook IRQ handling events
+ *
+ * This function hooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static int hook_irq_events(void)
+{
+ int ret;
+
+ ret = register_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ if (ret)
+ goto out_unregister_entry;
+
+ ret = osnoise_arch_register();
+ if (ret)
+ goto out_irq_exit;
+
+ return 0;
+
+out_irq_exit:
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+out_unregister_entry:
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_irq_events - Unhook IRQ handling events
+ *
+ * This function unhooks the IRQ related callbacks to the respective trace
+ * events.
+ */
+static void unhook_irq_events(void)
+{
+ osnoise_arch_unregister();
+ unregister_trace_irq_handler_exit(trace_irqexit_callback, NULL);
+ unregister_trace_irq_handler_entry(trace_irqentry_callback, NULL);
+}
+
+#ifndef CONFIG_PREEMPT_RT
+/*
+ * trace_softirq_entry_callback - Note the starting of a softirq
+ *
+ * Save the starting time of a softirq. As softirqs are non-preemptive to
+ * other softirqs, it is safe to use a single variable (ons_var->softirq)
+ * to save the statistics. The arrival_time is used to report... the
+ * arrival time. The delta_start is used to compute the duration at the
+ * softirq exit handler. See cond_move_softirq_delta_start().
+ */
+static void trace_softirq_entry_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (!osn_var->sampling)
+ return;
+ /*
+ * This value will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->softirq.arrival_time = time_get();
+ set_int_safe_time(osn_var, &osn_var->softirq.delta_start);
+ osn_var->softirq.count++;
+
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * trace_softirq_exit_callback - Note the end of an softirq
+ *
+ * Computes the duration of the softirq noise, and trace it. Also discounts the
+ * interference from other sources of noise could be currently being accounted.
+ */
+static void trace_softirq_exit_callback(void *data, unsigned int vec_nr)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ /*
+ * If the timerlat is enabled, but the irq handler did
+ * not run yet enabling timerlat_tracer, do not trace.
+ */
+ if (unlikely(osnoise_data.timerlat_tracer)) {
+ struct timerlat_variables *tlat_var;
+ tlat_var = this_cpu_tmr_var();
+ if (!tlat_var->tracing_thread) {
+ osn_var->softirq.arrival_time = 0;
+ osn_var->softirq.delta_start = 0;
+ return;
+ }
+ }
+#endif
+
+ duration = get_int_safe_duration(osn_var, &osn_var->softirq.delta_start);
+ trace_softirq_noise(vec_nr, osn_var->softirq.arrival_time, duration);
+ cond_move_thread_delta_start(osn_var, duration);
+ osn_var->softirq.arrival_time = 0;
+}
+
+/*
+ * hook_softirq_events - Hook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static int hook_softirq_events(void)
+{
+ int ret;
+
+ ret = register_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ if (ret)
+ goto out_err;
+
+ ret = register_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+ if (ret)
+ goto out_unreg_entry;
+
+ return 0;
+
+out_unreg_entry:
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+out_err:
+ return -EINVAL;
+}
+
+/*
+ * unhook_softirq_events - Unhook softirq handling events
+ *
+ * This function hooks the softirq related callbacks to the respective trace
+ * events.
+ */
+static void unhook_softirq_events(void)
+{
+ unregister_trace_softirq_entry(trace_softirq_entry_callback, NULL);
+ unregister_trace_softirq_exit(trace_softirq_exit_callback, NULL);
+}
+#else /* CONFIG_PREEMPT_RT */
+/*
+ * softirq are threads on the PREEMPT_RT mode.
+ */
+static int hook_softirq_events(void)
+{
+ return 0;
+}
+static void unhook_softirq_events(void)
+{
+}
+#endif
+
+/*
+ * thread_entry - Record the starting of a thread noise window
+ *
+ * It saves the context switch time for a noisy thread, and increments
+ * the interference counters.
+ */
+static void
+thread_entry(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ if (!osn_var->sampling)
+ return;
+ /*
+ * The arrival time will be used in the report, but not to compute
+ * the execution time, so it is safe to get it unsafe.
+ */
+ osn_var->thread.arrival_time = time_get();
+
+ set_int_safe_time(osn_var, &osn_var->thread.delta_start);
+
+ osn_var->thread.count++;
+ local_inc(&osn_var->int_counter);
+}
+
+/*
+ * thread_exit - Report the end of a thread noise window
+ *
+ * It computes the total noise from a thread, tracing if needed.
+ */
+static void
+thread_exit(struct osnoise_variables *osn_var, struct task_struct *t)
+{
+ int duration;
+
+ if (!osn_var->sampling)
+ return;
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ if (osnoise_data.timerlat_tracer) {
+ struct timerlat_variables *tlat_var;
+ tlat_var = this_cpu_tmr_var();
+ if (!tlat_var->tracing_thread) {
+ osn_var->thread.delta_start = 0;
+ osn_var->thread.arrival_time = 0;
+ return;
+ }
+ }
+#endif
+
+ duration = get_int_safe_duration(osn_var, &osn_var->thread.delta_start);
+
+ trace_thread_noise(t, osn_var->thread.arrival_time, duration);
+
+ osn_var->thread.arrival_time = 0;
+}
+
+/*
+ * trace_sched_switch - sched:sched_switch trace event handler
+ *
+ * This function is hooked to the sched:sched_switch trace event, and it is
+ * used to record the beginning and to report the end of a thread noise window.
+ */
+static void
+trace_sched_switch_callback(void *data, bool preempt, struct task_struct *p,
+ struct task_struct *n)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+
+ if (p->pid != osn_var->pid)
+ thread_exit(osn_var, p);
+
+ if (n->pid != osn_var->pid)
+ thread_entry(osn_var, n);
+}
+
+/*
+ * hook_thread_events - Hook the insturmentation for thread noise
+ *
+ * Hook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static int hook_thread_events(void)
+{
+ int ret;
+
+ ret = register_trace_sched_switch(trace_sched_switch_callback, NULL);
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * unhook_thread_events - *nhook the insturmentation for thread noise
+ *
+ * Unook the osnoise tracer callbacks to handle the noise from other
+ * threads on the necessary kernel events.
+ */
+static void unhook_thread_events(void)
+{
+ unregister_trace_sched_switch(trace_sched_switch_callback, NULL);
+}
+
+/*
+ * save_osn_sample_stats - Save the osnoise_sample statistics
+ *
+ * Save the osnoise_sample statistics before the sampling phase. These
+ * values will be used later to compute the diff betwneen the statistics
+ * before and after the osnoise sampling.
+ */
+static void
+save_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count;
+ s->irq_count = osn_var->irq.count;
+ s->softirq_count = osn_var->softirq.count;
+ s->thread_count = osn_var->thread.count;
+}
+
+/*
+ * diff_osn_sample_stats - Compute the osnoise_sample statistics
+ *
+ * After a sample period, compute the difference on the osnoise_sample
+ * statistics. The struct osnoise_sample *s contains the statistics saved via
+ * save_osn_sample_stats() before the osnoise sampling.
+ */
+static void
+diff_osn_sample_stats(struct osnoise_variables *osn_var, struct osnoise_sample *s)
+{
+ s->nmi_count = osn_var->nmi.count - s->nmi_count;
+ s->irq_count = osn_var->irq.count - s->irq_count;
+ s->softirq_count = osn_var->softirq.count - s->softirq_count;
+ s->thread_count = osn_var->thread.count - s->thread_count;
+}
+
+/*
+ * osnoise_stop_tracing - Stop tracing and the tracer.
+ */
+static void osnoise_stop_tracing(void)
+{
+ struct trace_array *tr = osnoise_trace;
+ tracer_tracing_off(tr);
+}
+
+/*
+ * run_osnoise - Sample the time and look for osnoise
+ *
+ * Used to capture the time, looking for potential osnoise latency repeatedly.
+ * Different from hwlat_detector, it is called with preemption and interrupts
+ * enabled. This allows irqs, softirqs and threads to run, interfering on the
+ * osnoise sampling thread, as they would do with a regular thread.
+ */
+static int run_osnoise(void)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct trace_array *tr = osnoise_trace;
+ u64 start, sample, last_sample;
+ u64 last_int_count, int_count;
+ s64 noise = 0, max_noise = 0;
+ s64 total, last_total = 0;
+ struct osnoise_sample s;
+ unsigned int threshold;
+ u64 runtime, stop_in;
+ u64 sum_noise = 0;
+ int hw_count = 0;
+ int ret = -1;
+
+ /*
+ * Considers the current thread as the workload.
+ */
+ osn_var->pid = current->pid;
+
+ /*
+ * Save the current stats for the diff
+ */
+ save_osn_sample_stats(osn_var, &s);
+
+ /*
+ * if threshold is 0, use the default value of 5 us.
+ */
+ threshold = tracing_thresh ? : 5000;
+
+ /*
+ * Make sure NMIs see sampling first
+ */
+ osn_var->sampling = true;
+ barrier();
+
+ /*
+ * Transform the *_us config to nanoseconds to avoid the
+ * division on the main loop.
+ */
+ runtime = osnoise_data.sample_runtime * NSEC_PER_USEC;
+ stop_in = osnoise_data.stop_tracing * NSEC_PER_USEC;
+
+ /*
+ * Start timestemp
+ */
+ start = time_get();
+
+ /*
+ * "previous" loop.
+ */
+ last_int_count = set_int_safe_time(osn_var, &last_sample);
+
+ do {
+ /*
+ * Get sample!
+ */
+ int_count = set_int_safe_time(osn_var, &sample);
+
+ noise = time_sub(sample, last_sample);
+
+ /*
+ * This shouldn't happen.
+ */
+ if (noise < 0) {
+ osnoise_taint("negative noise!");
+ goto out;
+ }
+
+ /*
+ * Sample runtime.
+ */
+ total = time_sub(sample, start);
+
+ /*
+ * Check for possible overflows.
+ */
+ if (total < last_total) {
+ osnoise_taint("total overflow!");
+ break;
+ }
+
+ last_total = total;
+
+ if (noise >= threshold) {
+ int interference = int_count - last_int_count;
+
+ if (noise > max_noise)
+ max_noise = noise;
+
+ if (!interference)
+ hw_count++;
+
+ sum_noise += noise;
+
+ trace_sample_threshold(last_sample, noise, interference);
+
+ if (osnoise_data.stop_tracing)
+ if (noise > stop_in)
+ osnoise_stop_tracing();
+ }
+
+ /*
+ * For the non-preemptive kernel config: let threads runs, if
+ * they so wish.
+ */
+ cond_resched();
+
+ last_sample = sample;
+ last_int_count = int_count;
+
+ } while (total < runtime && !kthread_should_stop());
+
+ /*
+ * Finish the above in the view for interrupts.
+ */
+ barrier();
+
+ osn_var->sampling = false;
+
+ /*
+ * Make sure sampling data is no longer updated.
+ */
+ barrier();
+
+ /*
+ * Save noise info.
+ */
+ s.noise = time_to_us(sum_noise);
+ s.runtime = time_to_us(total);
+ s.max_sample = time_to_us(max_noise);
+ s.hw_count = hw_count;
+
+ /* Save interference stats info */
+ diff_osn_sample_stats(osn_var, &s);
+
+ trace_osnoise_sample(&s);
+
+ /* Keep a running maximum ever recorded osnoise "latency" */
+ if (max_noise > tr->max_latency) {
+ tr->max_latency = max_noise;
+ latency_fsnotify(tr);
+ }
+
+ if (osnoise_data.stop_tracing_total)
+ if (s.noise > osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ return 0;
+out:
+ return ret;
+}
+
+static struct cpumask osnoise_cpumask;
+static struct cpumask save_cpumask;
+
+/*
+ * osnoise_main - The osnoise detection kernel thread
+ *
+ * Calls run_osnoise() function to measure the osnoise for the configured runtime,
+ * every period.
+ */
+static int osnoise_main(void *data)
+{
+ u64 interval;
+
+ while (!kthread_should_stop()) {
+
+ run_osnoise();
+
+ mutex_lock(&interface_lock);
+ interval = osnoise_data.sample_period - osnoise_data.sample_runtime;
+ mutex_unlock(&interface_lock);
+
+ do_div(interval, USEC_PER_MSEC);
+
+ /*
+ * differently from hwlat_detector, the osnoise tracer can run
+ * without a pause because preemption is on.
+ */
+ if (interval < 1) {
+ /* Let synchronize_rcu_tasks() make progress */
+ cond_resched_tasks_rcu_qs();
+ continue;
+ }
+
+ if (msleep_interruptible(interval))
+ break;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * timerlat_irq - hrtimer handler for timerlat.
+ */
+static enum hrtimer_restart timerlat_irq(struct hrtimer *timer)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct trace_array *tr = osnoise_trace;
+ struct timerlat_variables *tlat;
+ struct timerlat_sample s;
+ u64 now;
+ u64 diff;
+
+ /*
+ * I am not sure if the timer was armed for this CPU. So, get
+ * the timerlat struct from the timer itself, not from this
+ * CPU.
+ */
+ tlat = container_of(timer, struct timerlat_variables, timer);
+
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+
+ /*
+ * Enable the osnoise: events for thread an softirq.
+ */
+ tlat->tracing_thread = true;
+
+ osn_var->thread.arrival_time = time_get();
+
+ /*
+ * A hardirq is running: the timer IRQ. It is for sure preempting
+ * a thread, and potentially preempting a softirq.
+ *
+ * At this point, it is not interesting to know the duration of the
+ * preempted thread (and maybe softirq), but how much time they will
+ * delay the beginning of the execution of the timer thread.
+ *
+ * To get the correct (net) delay added by the softirq, its delta_start
+ * is set as the IRQ one. In this way, at the return of the IRQ, the delta
+ * start of the sofitrq will be zeroed, accounting then only the time
+ * after that.
+ *
+ * The thread follows the same principle. However, if a softirq is
+ * running, the thread needs to receive the softirq delta_start. The
+ * reason being is that the softirq will be the last to be unfolded,
+ * resseting the thread delay to zero.
+ */
+#ifndef CONFIG_PREEMPT_RT
+ if (osn_var->softirq.delta_start) {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->softirq.delta_start);
+
+ copy_int_safe_time(osn_var, &osn_var->softirq.delta_start,
+ &osn_var->irq.delta_start);
+ } else {
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start,
+ &osn_var->irq.delta_start);
+ }
+#else /* CONFIG_PREEMPT_RT */
+ /*
+ * The sofirqs run as threads on RT, so there is not need
+ * to keep track of it.
+ */
+ copy_int_safe_time(osn_var, &osn_var->thread.delta_start, &osn_var->irq.delta_start);
+#endif /* CONFIG_PREEMPT_RT */
+
+ /*
+ * Compute the current time with the expected time.
+ */
+ diff = now - tlat->abs_period;
+
+ tlat->count++;
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = IRQ_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+ /* Keep a running maximum ever recorded os noise "latency" */
+ if (diff > tr->max_latency) {
+ tr->max_latency = diff;
+ latency_fsnotify(tr);
+ }
+
+ if (osnoise_data.stop_tracing)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing)
+ osnoise_stop_tracing();
+
+ wake_up_process(tlat->kthread);
+
+ if (osnoise_data.print_stack)
+ timerlat_save_stack(0);
+
+ return HRTIMER_NORESTART;
+}
+
+/*
+ * wait_next_period - Wait for the next period for timerlat
+ */
+static int wait_next_period(struct timerlat_variables *tlat)
+{
+ ktime_t next_abs_period, now;
+ u64 rel_period = osnoise_data.timerlat_period * 1000;
+
+ now = hrtimer_cb_get_time(&tlat->timer);
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+
+ /*
+ * Save the next abs_period.
+ */
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+
+ /*
+ * If the new abs_period is in the past, skip the activation.
+ */
+ while (ktime_compare(now, next_abs_period) > 0) {
+ next_abs_period = ns_to_ktime(tlat->abs_period + rel_period);
+ tlat->abs_period = (u64) ktime_to_ns(next_abs_period);
+ }
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ hrtimer_start(&tlat->timer, next_abs_period, HRTIMER_MODE_ABS_PINNED_HARD);
+ schedule();
+ return 1;
+}
+
+/*
+ * timerlat_main- Timerlat main
+ */
+static int timerlat_main(void *data)
+{
+ struct osnoise_variables *osn_var = this_cpu_osn_var();
+ struct timerlat_variables *tlat = this_cpu_tmr_var();
+ struct timerlat_sample s;
+ struct sched_param sp;
+ u64 now, diff;
+
+ /*
+ * Make the thread RT, that is how cyclictest is usually used.
+ */
+ sp.sched_priority = DEFAULT_TIMERLAT_PRIO;
+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+
+ tlat->count = 0;
+ tlat->tracing_thread = false;
+
+ hrtimer_init(&tlat->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
+ tlat->timer.function = timerlat_irq;
+ tlat->kthread = current;
+ osn_var->pid = current->pid;
+ /*
+ * Anotate the arrival time.
+ */
+ tlat->abs_period = hrtimer_cb_get_time(&tlat->timer);
+
+ wait_next_period(tlat);
+
+ osn_var->sampling = 1;
+
+ while (!kthread_should_stop()) {
+ now = ktime_to_ns(hrtimer_cb_get_time(&tlat->timer));
+ diff = now - tlat->abs_period;
+
+ s.seqnum = tlat->count;
+ s.timer_latency = diff;
+ s.context = THREAD_CONTEXT;
+
+ trace_timerlat_sample(&s);
+
+#ifdef CONFIG_STACKTRACE
+ if (osnoise_data.print_stack)
+ if (osnoise_data.print_stack <= time_to_us(diff))
+ timerlat_dump_stack();
+#endif /* CONFIG_STACKTRACE */
+
+ tlat->tracing_thread = false;
+ if (osnoise_data.stop_tracing_total)
+ if (time_to_us(diff) >= osnoise_data.stop_tracing_total)
+ osnoise_stop_tracing();
+
+ wait_next_period(tlat);
+ }
+
+ hrtimer_cancel(&tlat->timer);
+ return 0;
+}
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+/*
+ * stop_kthread - stop a workload thread
+ */
+static void stop_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+
+ kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
+ if (kthread)
+ kthread_stop(kthread);
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+}
+
+/*
+ * stop_per_cpu_kthread - Stop per-cpu threads
+ *
+ * Stop the osnoise sampling htread. Use this on unload and at system
+ * shutdown.
+ */
+static void stop_per_cpu_kthreads(void)
+{
+ int cpu;
+
+ get_online_cpus();
+
+ for_each_online_cpu(cpu)
+ stop_kthread(cpu);
+
+ put_online_cpus();
+}
+
+/*
+ * start_kthread - Start a workload tread
+ */
+static int start_kthread(unsigned int cpu)
+{
+ struct task_struct *kthread;
+ void *main = osnoise_main;
+ char comm[24];
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ if (osnoise_data.timerlat_tracer) {
+ snprintf(comm, 24, "timerlat/%d", cpu);
+ main = timerlat_main;
+ } else {
+ snprintf(comm, 24, "osnoise/%d", cpu);
+ }
+#else
+ snprintf(comm, 24, "osnoise/%d", cpu);
+#endif
+ kthread = kthread_create_on_cpu(main, NULL, cpu, comm);
+
+ if (IS_ERR(kthread)) {
+ pr_err(BANNER "could not start sampling thread\n");
+ stop_per_cpu_kthreads();
+ return -ENOMEM;
+ }
+
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = kthread;
+ wake_up_process(kthread);
+
+ return 0;
+}
+
+/*
+ * start_per_cpu_kthread - Kick off per-cpu osnoise sampling kthreads
+ *
+ * This starts the kernel thread that will look for osnoise on many
+ * cpus.
+ */
+static int start_per_cpu_kthreads(struct trace_array *tr)
+{
+ struct cpumask *current_mask = &save_cpumask;
+ int retval;
+ int cpu;
+
+ get_online_cpus();
+ /*
+ * Run only on CPUs in which trace and osnoise are allowed to run.
+ */
+ cpumask_and(current_mask, tr->tracing_cpumask, &osnoise_cpumask);
+ /*
+ * And the CPU is online.
+ */
+ cpumask_and(current_mask, cpu_online_mask, current_mask);
+
+ for_each_possible_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+
+ for_each_cpu(cpu, current_mask) {
+ retval = start_kthread(cpu);
+ if (retval) {
+ stop_per_cpu_kthreads();
+ return retval;
+ }
+ }
+
+ put_online_cpus();
+
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static void osnoise_hotplug_workfn(struct work_struct *dummy)
+{
+ struct trace_array *tr = osnoise_trace;
+ unsigned int cpu = smp_processor_id();
+
+
+ mutex_lock(&trace_types_lock);
+
+ if (!osnoise_busy)
+ goto out_unlock_trace;
+
+ mutex_lock(&interface_lock);
+ get_online_cpus();
+
+ if (!cpumask_test_cpu(cpu, &osnoise_cpumask))
+ goto out_unlock;
+
+ if (!cpumask_test_cpu(cpu, tr->tracing_cpumask))
+ goto out_unlock;
+
+ start_kthread(cpu);
+
+out_unlock:
+ put_online_cpus();
+ mutex_unlock(&interface_lock);
+out_unlock_trace:
+ mutex_unlock(&trace_types_lock);
+}
+
+static DECLARE_WORK(osnoise_hotplug_work, osnoise_hotplug_workfn);
+
+/*
+ * osnoise_cpu_init - CPU hotplug online callback function
+ */
+static int osnoise_cpu_init(unsigned int cpu)
+{
+ schedule_work_on(cpu, &osnoise_hotplug_work);
+ return 0;
+}
+
+/*
+ * osnoise_cpu_die - CPU hotplug offline callback function
+ */
+static int osnoise_cpu_die(unsigned int cpu)
+{
+ stop_kthread(cpu);
+ return 0;
+}
+
+static void osnoise_init_hotplug_support(void)
+{
+ int ret;
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "trace/osnoise:online",
+ osnoise_cpu_init, osnoise_cpu_die);
+ if (ret < 0)
+ pr_warn(BANNER "Error to init cpu hotplug support\n");
+
+ return;
+}
+#else /* CONFIG_HOTPLUG_CPU */
+static void osnoise_init_hotplug_support(void)
+{
+ return;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * osnoise_cpus_read - Read function for reading the "cpus" file
+ * @filp: The active open file structure
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * Prints the "cpus" output into the user-provided buffer.
+ */
+static ssize_t
+osnoise_cpus_read(struct file *filp, char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ char *mask_str;
+ int len;
+
+ mutex_lock(&interface_lock);
+
+ len = snprintf(NULL, 0, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask)) + 1;
+ mask_str = kmalloc(len, GFP_KERNEL);
+ if (!mask_str) {
+ count = -ENOMEM;
+ goto out_unlock;
+ }
+
+ len = snprintf(mask_str, len, "%*pbl\n", cpumask_pr_args(&osnoise_cpumask));
+ if (len >= count) {
+ count = -EINVAL;
+ goto out_free;
+ }
+
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
+
+out_free:
+ kfree(mask_str);
+out_unlock:
+ mutex_unlock(&interface_lock);
+
+ return count;
+}
+
+static void osnoise_tracer_start(struct trace_array *tr);
+static void osnoise_tracer_stop(struct trace_array *tr);
+
+/*
+ * osnoise_cpus_write - Write function for "cpus" entry
+ * @filp: The active open file structure
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in @file
+ *
+ * This function provides a write implementation for the "cpus"
+ * interface to the osnoise trace. By default, it lists all CPUs,
+ * in this way, allowing osnoise threads to run on any online CPU
+ * of the system. It serves to restrict the execution of osnoise to the
+ * set of CPUs writing via this interface. Note that osnoise also
+ * respects the "tracing_cpumask." Hence, osnoise threads will run only
+ * on the set of CPUs allowed here AND on "tracing_cpumask." Why not
+ * have just "tracing_cpumask?" Because the user might be interested
+ * in tracing what is running on other CPUs. For instance, one might
+ * run osnoise in one HT CPU while observing what is running on the
+ * sibling HT CPU.
+ */
+static ssize_t
+osnoise_cpus_write(struct file *filp, const char __user *ubuf, size_t count,
+ loff_t *ppos)
+{
+ struct trace_array *tr = osnoise_trace;
+ cpumask_var_t osnoise_cpumask_new;
+ int running, err;
+ char buf[256];
+
+ if (count >= 256)
+ return -EINVAL;
+
+ if (copy_from_user(buf, ubuf, count))
+ return -EFAULT;
+
+ if (!zalloc_cpumask_var(&osnoise_cpumask_new, GFP_KERNEL))
+ return -ENOMEM;
+
+ err = cpulist_parse(buf, osnoise_cpumask_new);
+ if (err)
+ goto err_free;
+
+ /*
+ * trace_types_lock is taken to avoid concurrency on start/stop
+ * and osnoise_busy.
+ */
+ mutex_lock(&trace_types_lock);
+ running = osnoise_busy;
+ if (running)
+ osnoise_tracer_stop(tr);
+
+ mutex_lock(&interface_lock);
+ /*
+ * osnoise_cpumask is read by CPU hotplug operations.
+ */
+ get_online_cpus();
+
+ cpumask_copy(&osnoise_cpumask, osnoise_cpumask_new);
+
+ put_online_cpus();
+ mutex_unlock(&interface_lock);
+
+ if (running)
+ osnoise_tracer_start(tr);
+ mutex_unlock(&trace_types_lock);
+
+ free_cpumask_var(osnoise_cpumask_new);
+ return count;
+
+err_free:
+ free_cpumask_var(osnoise_cpumask_new);
+
+ return err;
+}
+
+/*
+ * osnoise/runtime_us: cannot be greater than the period.
+ */
+static struct trace_min_max_param osnoise_runtime = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_runtime,
+ .max = &osnoise_data.sample_period,
+ .min = NULL,
+};
+
+/*
+ * osnoise/period_us: cannot be smaller than the runtime.
+ */
+static struct trace_min_max_param osnoise_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.sample_period,
+ .max = NULL,
+ .min = &osnoise_data.sample_runtime,
+};
+
+/*
+ * osnoise/stop_tracing_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_in = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/stop_tracing_total_us: no limit.
+ */
+static struct trace_min_max_param osnoise_stop_tracing_total = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.stop_tracing_total,
+ .max = NULL,
+ .min = NULL,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+/*
+ * osnoise/print_stack: print the stacktrace of the IRQ handler if the total
+ * latency is higher than val.
+ */
+static struct trace_min_max_param osnoise_print_stack = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.print_stack,
+ .max = NULL,
+ .min = NULL,
+};
+
+/*
+ * osnoise/timerlat_period: min 100 us, max 1 s
+ */
+u64 timerlat_min_period = 100;
+u64 timerlat_max_period = 1000000;
+static struct trace_min_max_param timerlat_period = {
+ .lock = &interface_lock,
+ .val = &osnoise_data.timerlat_period,
+ .max = &timerlat_max_period,
+ .min = &timerlat_min_period,
+};
+#endif
+
+static const struct file_operations cpus_fops = {
+ .open = tracing_open_generic,
+ .read = osnoise_cpus_read,
+ .write = osnoise_cpus_write,
+ .llseek = generic_file_llseek,
+};
+
+/*
+ * init_tracefs - A function to initialize the tracefs interface files
+ *
+ * This function creates entries in tracefs for "osnoise" and "timerlat".
+ * It creates these directories in the tracing directory, and within that
+ * directory the use can change and view the configs.
+ */
+static int init_tracefs(void)
+{
+ struct dentry *top_dir;
+ struct dentry *tmp;
+ int ret;
+
+ ret = tracing_init_dentry();
+ if (ret)
+ return -ENOMEM;
+
+ top_dir = tracefs_create_dir("osnoise", NULL);
+ if (!top_dir)
+ return 0;
+
+ tmp = tracefs_create_file("period_us", 0640, top_dir,
+ &osnoise_period, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("runtime_us", 0644, top_dir,
+ &osnoise_runtime, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_us", 0640, top_dir,
+ &osnoise_stop_tracing_in, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = tracefs_create_file("stop_tracing_total_us", 0640, top_dir,
+ &osnoise_stop_tracing_total, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+
+ tmp = trace_create_file("cpus", 0644, top_dir, NULL, &cpus_fops);
+ if (!tmp)
+ goto err;
+#ifdef CONFIG_TIMERLAT_TRACER
+#ifdef CONFIG_STACKTRACE
+ tmp = tracefs_create_file("print_stack", 0640, top_dir,
+ &osnoise_print_stack, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+#endif
+
+ tmp = tracefs_create_file("timerlat_period_us", 0640, top_dir,
+ &timerlat_period, &trace_min_max_fops);
+ if (!tmp)
+ goto err;
+#endif
+
+ return 0;
+
+err:
+ tracefs_remove(top_dir);
+ return -ENOMEM;
+}
+
+static int osnoise_hook_events(void)
+{
+ int retval;
+
+ /*
+ * Trace is already hooked, we are re-enabling from
+ * a stop_tracing_*.
+ */
+ if (trace_osnoise_callback_enabled)
+ return 0;
+
+ retval = hook_irq_events();
+ if (retval)
+ return -EINVAL;
+
+ retval = hook_softirq_events();
+ if (retval)
+ goto out_unhook_irq;
+
+ retval = hook_thread_events();
+ /*
+ * All fine!
+ */
+ if (!retval)
+ return 0;
+
+ unhook_softirq_events();
+out_unhook_irq:
+ unhook_irq_events();
+ return -EINVAL;
+}
+
+static int __osnoise_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ osn_var_reset_all();
+
+ retval = osnoise_hook_events();
+ if (retval)
+ return retval;
+ /*
+ * Make sure NMIs see reseted values.
+ */
+ barrier();
+ trace_osnoise_callback_enabled = true;
+
+ retval = start_per_cpu_kthreads(tr);
+ if (retval) {
+ unhook_irq_events();
+ return retval;
+ }
+
+ osnoise_busy = true;
+
+ return 0;
+}
+
+static void osnoise_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ if (osnoise_busy)
+ return;
+
+ retval = __osnoise_tracer_start(tr);
+ if (retval)
+ pr_err(BANNER "Error starting osnoise tracer\n");
+
+}
+
+static void osnoise_tracer_stop(struct trace_array *tr)
+{
+ if (!osnoise_busy)
+ return;
+
+ trace_osnoise_callback_enabled = false;
+ barrier();
+
+ stop_per_cpu_kthreads();
+
+ unhook_irq_events();
+ unhook_softirq_events();
+ unhook_thread_events();
+
+ osnoise_busy = false;
+}
+
+static int osnoise_tracer_init(struct trace_array *tr)
+{
+
+ /* Only allow one instance to enable this */
+ if (osnoise_busy)
+ return -EBUSY;
+
+ osnoise_trace = tr;
+ tr->max_latency = 0;
+
+ osnoise_tracer_start(tr);
+
+ return 0;
+}
+
+static void osnoise_tracer_reset(struct trace_array *tr)
+{
+ osnoise_tracer_stop(tr);
+}
+
+static struct tracer osnoise_tracer __read_mostly = {
+ .name = "osnoise",
+ .init = osnoise_tracer_init,
+ .reset = osnoise_tracer_reset,
+ .start = osnoise_tracer_start,
+ .stop = osnoise_tracer_stop,
+ .print_header = print_osnoise_headers,
+ .allow_instances = true,
+};
+
+#ifdef CONFIG_TIMERLAT_TRACER
+static void timerlat_tracer_start(struct trace_array *tr)
+{
+ int retval;
+
+ if (osnoise_busy)
+ return;
+
+ osnoise_data.timerlat_tracer = 1;
+
+ retval = __osnoise_tracer_start(tr);
+ if (retval)
+ goto out_err;
+
+ return;
+out_err:
+ pr_err(BANNER "Error starting timerlat tracer\n");
+}
+
+static void timerlat_tracer_stop(struct trace_array *tr)
+{
+ int cpu;
+
+ if (!osnoise_busy)
+ return;
+
+ for_each_online_cpu(cpu)
+ per_cpu(per_cpu_osnoise_var, cpu).sampling = 0;
+
+ osnoise_tracer_stop(tr);
+
+ osnoise_data.timerlat_tracer = 0;
+}
+
+static int timerlat_tracer_init(struct trace_array *tr)
+{
+ /* Only allow one instance to enable this */
+ if (osnoise_busy)
+ return -EBUSY;
+
+ osnoise_trace = tr;
+
+ tr->max_latency = 0;
+
+ timerlat_tracer_start(tr);
+
+ return 0;
+}
+
+static void timerlat_tracer_reset(struct trace_array *tr)
+{
+ timerlat_tracer_stop(tr);
+}
+
+static struct tracer timerlat_tracer __read_mostly = {
+ .name = "timerlat",
+ .init = timerlat_tracer_init,
+ .reset = timerlat_tracer_reset,
+ .start = timerlat_tracer_start,
+ .stop = timerlat_tracer_stop,
+ .print_header = print_timerlat_headers,
+ .allow_instances = true,
+};
+#endif /* CONFIG_TIMERLAT_TRACER */
+
+__init static int init_osnoise_tracer(void)
+{
+ int ret;
+
+ mutex_init(&interface_lock);
+
+ cpumask_copy(&osnoise_cpumask, cpu_all_mask);
+
+ ret = register_tracer(&osnoise_tracer);
+ if (ret) {
+ pr_err(BANNER "Error registering osnoise!\n");
+ return ret;
+ }
+
+#ifdef CONFIG_TIMERLAT_TRACER
+ ret = register_tracer(&timerlat_tracer);
+ if (ret) {
+ pr_err(BANNER "Error registering timerlat\n");
+ return ret;
+ }
+#endif
+ osnoise_init_hotplug_support();
+
+ init_tracefs();
+
+ return 0;
+}
+late_initcall(init_osnoise_tracer);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index d0368a569bfa..a0bf446bb034 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -1202,7 +1202,6 @@ trace_hwlat_print(struct trace_iterator *iter, int flags,
return trace_handle_return(s);
}
-
static enum print_line_t
trace_hwlat_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
@@ -1232,6 +1231,122 @@ static struct trace_event trace_hwlat_event = {
.funcs = &trace_hwlat_funcs,
};
+/* TRACE_OSNOISE */
+static enum print_line_t
+trace_osnoise_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct osnoise_entry *field;
+ u64 ratio, ratio_dec;
+ u64 net_runtime;
+
+ trace_assign_type(field, entry);
+
+ /*
+ * compute the available % of cpu time.
+ */
+ net_runtime = field->runtime - field->noise;
+ ratio = net_runtime * 10000000;
+ do_div(ratio, field->runtime);
+ ratio_dec = do_div(ratio, 100000);
+
+ trace_seq_printf(s, "%llu %10llu %3llu.%05llu %7llu",
+ field->runtime,
+ field->noise,
+ ratio, ratio_dec,
+ field->max_sample);
+
+ trace_seq_printf(s, " %6u", field->hw_count);
+ trace_seq_printf(s, " %6u", field->nmi_count);
+ trace_seq_printf(s, " %6u", field->irq_count);
+ trace_seq_printf(s, " %6u", field->softirq_count);
+ trace_seq_printf(s, " %6u", field->thread_count);
+
+ trace_seq_putc(s, '\n');
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_osnoise_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct osnoise_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%lld %llu %llu %u %u %u %u %u\n",
+ field->runtime,
+ field->noise,
+ field->max_sample,
+ field->hw_count,
+ field->nmi_count,
+ field->irq_count,
+ field->softirq_count,
+ field->thread_count);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_osnoise_funcs = {
+ .trace = trace_osnoise_print,
+ .raw = trace_osnoise_raw,
+};
+
+static struct trace_event trace_osnoise_event = {
+ .type = TRACE_OSNOISE,
+ .funcs = &trace_osnoise_funcs,
+};
+
+/* TRACE_TIMERLAT */
+static enum print_line_t
+trace_timerlat_print(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct trace_entry *entry = iter->ent;
+ struct trace_seq *s = &iter->seq;
+ struct timerlat_entry *field;
+
+ trace_assign_type(field, entry);
+
+ trace_seq_printf(s, "#%-5u context %6s timer_latency %9llu ns\n",
+ field->seqnum,
+ field->context ? "thread" : "irq",
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static enum print_line_t
+trace_timerlat_raw(struct trace_iterator *iter, int flags,
+ struct trace_event *event)
+{
+ struct timerlat_entry *field;
+ struct trace_seq *s = &iter->seq;
+
+ trace_assign_type(field, iter->ent);
+
+ trace_seq_printf(s, "%u %d %llu\n",
+ field->seqnum,
+ field->context,
+ field->timer_latency);
+
+ return trace_handle_return(s);
+}
+
+static struct trace_event_functions trace_timerlat_funcs = {
+ .trace = trace_timerlat_print,
+ .raw = trace_timerlat_raw,
+};
+
+static struct trace_event trace_timerlat_event = {
+ .type = TRACE_TIMERLAT,
+ .funcs = &trace_timerlat_funcs,
+};
+
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print(struct trace_iterator *iter, int flags,
@@ -1442,6 +1557,8 @@ static struct trace_event *events[] __initdata = {
&trace_bprint_event,
&trace_print_event,
&trace_hwlat_event,
+ &trace_osnoise_event,
+ &trace_timerlat_event,
&trace_raw_data_event,
&trace_func_repeats_event,
NULL
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e5778d1d7a5b..2402de520eca 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -26,9 +26,9 @@ static struct task_struct *wakeup_task;
static int wakeup_cpu;
static int wakeup_current_cpu;
static unsigned wakeup_prio = -1;
-static int wakeup_rt;
-static int wakeup_dl;
-static int tracing_dl = 0;
+static bool wakeup_rt;
+static bool wakeup_dl;
+static bool tracing_dl;
static arch_spinlock_t wakeup_lock =
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
@@ -498,7 +498,7 @@ static void __wakeup_reset(struct trace_array *tr)
{
wakeup_cpu = -1;
wakeup_prio = -1;
- tracing_dl = 0;
+ tracing_dl = false;
if (wakeup_task)
put_task_struct(wakeup_task);
@@ -572,9 +572,9 @@ probe_wakeup(void *ignore, struct task_struct *p)
* another task until the first one wakes up.
*/
if (dl_task(p))
- tracing_dl = 1;
+ tracing_dl = true;
else
- tracing_dl = 0;
+ tracing_dl = false;
wakeup_task = get_task_struct(p);
@@ -685,8 +685,8 @@ static int wakeup_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 0;
+ wakeup_dl = false;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
@@ -695,8 +695,8 @@ static int wakeup_rt_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 0;
- wakeup_rt = 1;
+ wakeup_dl = false;
+ wakeup_rt = true;
return __wakeup_tracer_init(tr);
}
@@ -705,8 +705,8 @@ static int wakeup_dl_tracer_init(struct trace_array *tr)
if (wakeup_busy)
return -EBUSY;
- wakeup_dl = 1;
- wakeup_rt = 0;
+ wakeup_dl = true;
+ wakeup_rt = false;
return __wakeup_tracer_init(tr);
}
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 9f478d29b926..976bf8ce8039 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -273,7 +273,8 @@ static void tracepoint_update_call(struct tracepoint *tp, struct tracepoint_func
* Add the probe function to a tracepoint.
*/
static int tracepoint_add_func(struct tracepoint *tp,
- struct tracepoint_func *func, int prio)
+ struct tracepoint_func *func, int prio,
+ bool warn)
{
struct tracepoint_func *old, *tp_funcs;
int ret;
@@ -288,7 +289,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
- WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
+ WARN_ON_ONCE(warn && PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
@@ -344,6 +345,32 @@ static int tracepoint_remove_func(struct tracepoint *tp,
}
/**
+ * tracepoint_probe_register_prio_may_exist - Connect a probe to a tracepoint with priority
+ * @tp: tracepoint
+ * @probe: probe handler
+ * @data: tracepoint data
+ * @prio: priority of this function over other registered functions
+ *
+ * Same as tracepoint_probe_register_prio() except that it will not warn
+ * if the tracepoint is already registered.
+ */
+int tracepoint_probe_register_prio_may_exist(struct tracepoint *tp, void *probe,
+ void *data, int prio)
+{
+ struct tracepoint_func tp_func;
+ int ret;
+
+ mutex_lock(&tracepoints_mutex);
+ tp_func.func = probe;
+ tp_func.data = data;
+ tp_func.prio = prio;
+ ret = tracepoint_add_func(tp, &tp_func, prio, false);
+ mutex_unlock(&tracepoints_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(tracepoint_probe_register_prio_may_exist);
+
+/**
* tracepoint_probe_register_prio - Connect a probe to a tracepoint with priority
* @tp: tracepoint
* @probe: probe handler
@@ -366,7 +393,7 @@ int tracepoint_probe_register_prio(struct tracepoint *tp, void *probe,
tp_func.func = probe;
tp_func.data = data;
tp_func.prio = prio;
- ret = tracepoint_add_func(tp, &tp_func, prio);
+ ret = tracepoint_add_func(tp, &tp_func, prio, true);
mutex_unlock(&tracepoints_mutex);
return ret;
}