From 02f4e01ce710fe20d2e5548d52bfdea52efd09d1 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 2 Jan 2020 19:04:57 -0500 Subject: tracing: Initialize val to zero in parse_entry of inject code gcc produces a variable may be uninitialized warning for "val" in parse_entry(). This is really a false positive, but the code is subtle enough to just initialize val to zero and it's not a fast path to worry about it. Marked for stable to remove the warning in the stable trees as well. Cc: stable@vger.kernel.org Fixes: 6c3edaf9fd6a3 ("tracing: Introduce trace event injection") Reported-by: kbuild test robot Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_events_inject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_events_inject.c b/kernel/trace/trace_events_inject.c index d45079ee62f8..22bcf7c51d1e 100644 --- a/kernel/trace/trace_events_inject.c +++ b/kernel/trace/trace_events_inject.c @@ -195,7 +195,7 @@ static int parse_entry(char *str, struct trace_event_call *call, void **pentry) unsigned long irq_flags; void *entry = NULL; int entry_size; - u64 val; + u64 val = 0; int len; entry = trace_alloc_entry(call, &entry_size); -- cgit v1.2.3 From d2ccbccb5444e9141b33cf5399927737e9ff1c3d Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 2 Jan 2020 21:56:44 -0500 Subject: tracing: Define MCOUNT_INSN_SIZE when not defined without direct calls In order to handle direct calls along side of function graph tracer, a check is made to see if the address being traced by the function graph tracer is a direct call or not. To get the address used by direct callers, the return address is subtracted by MCOUNT_INSN_SIZE. For some archs with certain configurations, MCOUNT_INSN_SIZE is undefined here. But these should not be using direct calls anyway. Just define MCOUNT_INSN_SIZE to zero in this case. Link: https://lore.kernel.org/r/202001020219.zvE3vsty%lkp@intel.com Reported-by: kbuild test robot Fixes: ff205766dbbee ("ftrace: Fix function_graph tracer interaction with BPF trampoline") Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/fgraph.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c index a2659735db73..1af321dec0f1 100644 --- a/kernel/trace/fgraph.c +++ b/kernel/trace/fgraph.c @@ -96,6 +96,20 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, return 0; } +/* + * Not all archs define MCOUNT_INSN_SIZE which is used to look for direct + * functions. But those archs currently don't support direct functions + * anyway, and ftrace_find_rec_direct() is just a stub for them. + * Define MCOUNT_INSN_SIZE to keep those archs compiling. + */ +#ifndef MCOUNT_INSN_SIZE +/* Make sure this only works without direct calls */ +# ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +# error MCOUNT_INSN_SIZE not defined with direct calls enabled +# endif +# define MCOUNT_INSN_SIZE 0 +#endif + int function_graph_enter(unsigned long ret, unsigned long func, unsigned long frame_pointer, unsigned long *retp) { -- cgit v1.2.3 From b8299d362d0837ae39e87e9019ebe6b736e0f035 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (VMware)" Date: Thu, 2 Jan 2020 22:02:41 -0500 Subject: tracing: Have stack tracer compile when MCOUNT_INSN_SIZE is not defined On some archs with some configurations, MCOUNT_INSN_SIZE is not defined, and this makes the stack tracer fail to compile. Just define it to zero in this case. Link: https://lore.kernel.org/r/202001020219.zvE3vsty%lkp@intel.com Cc: stable@vger.kernel.org Fixes: 4df297129f622 ("tracing: Remove most or all of stack tracer stack size from stack_max_size") Reported-by: kbuild test robot Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_stack.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 4df9a209f7ca..c557f42a9397 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -283,6 +283,11 @@ static void check_stack(unsigned long ip, unsigned long *stack) local_irq_restore(flags); } +/* Some archs may not define MCOUNT_INSN_SIZE */ +#ifndef MCOUNT_INSN_SIZE +# define MCOUNT_INSN_SIZE 0 +#endif + static void stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op, struct pt_regs *pt_regs) -- cgit v1.2.3 From e31f7939c1c27faa5d0e3f14519eaf7c89e8a69d Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Fri, 3 Jan 2020 11:02:48 +0800 Subject: ftrace: Avoid potential division by zero in function profiler The ftrace_profile->counter is unsigned long and do_div truncates it to 32 bits, which means it can test non-zero and be truncated to zero for division. Fix this issue by using div64_ul() instead. Link: http://lkml.kernel.org/r/20200103030248.14516-1-wenyang@linux.alibaba.com Cc: stable@vger.kernel.org Fixes: e330b3bcd8319 ("tracing: Show sample std dev in function profiling") Fixes: 34886c8bc590f ("tracing: add average time in function to function profiler") Signed-off-by: Wen Yang Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/ftrace.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'kernel/trace') diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ac99a3500076..9bf1f2cd515e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -526,8 +526,7 @@ static int function_stat_show(struct seq_file *m, void *v) } #ifdef CONFIG_FUNCTION_GRAPH_TRACER - avg = rec->time; - do_div(avg, rec->counter); + avg = div64_ul(rec->time, rec->counter); if (tracing_thresh && (avg < tracing_thresh)) goto out; #endif @@ -553,7 +552,8 @@ static int function_stat_show(struct seq_file *m, void *v) * Divide only 1000 for ns^2 -> us^2 conversion. * trace_print_graph_duration will divide 1000 again. */ - do_div(stddev, rec->counter * (rec->counter - 1) * 1000); + stddev = div64_ul(stddev, + rec->counter * (rec->counter - 1) * 1000); } trace_seq_init(&s); -- cgit v1.2.3 From 50f9ad607ea891a9308e67b81f774c71736d1098 Mon Sep 17 00:00:00 2001 From: Kaitao Cheng Date: Tue, 31 Dec 2019 05:35:30 -0800 Subject: kernel/trace: Fix do not unregister tracepoints when register sched_migrate_task fail In the function, if register_trace_sched_migrate_task() returns error, sched_switch/sched_wakeup_new/sched_wakeup won't unregister. That is why fail_deprobe_sched_switch was added. Link: http://lkml.kernel.org/r/20191231133530.2794-1-pilgrimtao@gmail.com Cc: stable@vger.kernel.org Fixes: 478142c39c8c2 ("tracing: do not grab lock in wakeup latency function tracing") Signed-off-by: Kaitao Cheng Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_sched_wakeup.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 5e43b9664eca..617e297f46dc 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -630,7 +630,7 @@ static void start_wakeup_tracer(struct trace_array *tr) if (ret) { pr_info("wakeup trace: Couldn't activate tracepoint" " probe to kernel_sched_migrate_task\n"); - return; + goto fail_deprobe_sched_switch; } wakeup_reset(tr); @@ -648,6 +648,8 @@ static void start_wakeup_tracer(struct trace_array *tr) printk(KERN_ERR "failed to start wakeup tracer\n"); return; +fail_deprobe_sched_switch: + unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL); fail_deprobe_wake_new: unregister_trace_sched_wakeup_new(probe_wakeup, NULL); fail_deprobe: -- cgit v1.2.3 From 72879ee0c53e2fc17f443f7b1adcc0d5130cd934 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Sat, 21 Dec 2019 15:48:25 +0000 Subject: tracing: Fix indentation issue There is a declaration that is indented one level too deeply, remove the extraneous tab. Link: http://lkml.kernel.org/r/20191221154825.33073-1-colin.king@canonical.com Signed-off-by: Colin Ian King Signed-off-by: Steven Rostedt (VMware) --- kernel/trace/trace_seq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/trace') diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 344e4c1aa09c..87de6edafd14 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c @@ -381,7 +381,7 @@ int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str, int prefix_type, int rowsize, int groupsize, const void *buf, size_t len, bool ascii) { - unsigned int save_len = s->seq.len; + unsigned int save_len = s->seq.len; if (s->full) return 0; -- cgit v1.2.3