summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-02 18:26:07 +0200
committerSteven Rostedt <rostedt@goodmis.org>2009-06-02 20:41:50 +0200
commit82310a3272d5a2a7652f5649ad8a55f58c8f74d9 (patch)
treec909c7a3f1fd3430f64be1d4aa8aff84fdf45b04 /kernel/trace
parentfunction-graph: only allocate init tasks if it was not already done (diff)
downloadlinux-82310a3272d5a2a7652f5649ad8a55f58c8f74d9.tar.xz
linux-82310a3272d5a2a7652f5649ad8a55f58c8f74d9.zip
function-graph: enable the stack after initialization of other variables
The function graph tracer checks if the task_struct has ret_stack defined to know if it is OK or not to use it. The initialization is done for all tasks by one process, but the idle tasks use the same initialization used by new tasks. If an interrupt happens on an idle task that just had the ret_stack created, but before the rest of the initialization took place, then we can corrupt the return address of the functions. This patch moves the setting of the task_struct's ret_stack to after the other variables have been initialized. [ Impact: prevent kernel panic on idle task when starting function graph ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/trace_functions_graph.c6
2 files changed, 13 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index ebff62ef40be..20e066065eb3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2739,15 +2739,20 @@ void unregister_ftrace_graph(void)
void ftrace_graph_init_task(struct task_struct *t)
{
if (atomic_read(&ftrace_graph_active)) {
- t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
+ struct ftrace_ret_stack *ret_stack;
+
+ ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
* sizeof(struct ftrace_ret_stack),
GFP_KERNEL);
- if (!t->ret_stack)
+ if (!ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
t->ftrace_timestamp = 0;
+ /* make curr_ret_stack visable before we add the ret_stack */
+ smp_wmb();
+ t->ret_stack = ret_stack;
} else
t->ret_stack = NULL;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d28687e7b3a7..baeb5fe36108 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
if (!current->ret_stack)
return -EBUSY;
+ /*
+ * We must make sure the ret_stack is tested before we read
+ * anything else.
+ */
+ smp_rmb();
+
/* The return trace stack is full */
if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
atomic_inc(&current->trace_overrun);