summaryrefslogtreecommitdiffstats
path: root/kernel/trace/fgraph.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2020-09-07 03:33:26 +0200
committerSteven Rostedt (VMware) <rostedt@goodmis.org>2020-09-22 03:06:02 +0200
commit40d14da383670db21a09e63d52db8dee9b77741e (patch)
tree4913385409f94966a7932c13a3c09f21c0109f2d /kernel/trace/fgraph.c
parenttracing: remove a pointless assignment (diff)
downloadlinux-40d14da383670db21a09e63d52db8dee9b77741e.tar.xz
linux-40d14da383670db21a09e63d52db8dee9b77741e.zip
fgraph: Convert ret_stack tasklist scanning to rcu
It seems that alloc_retstack_tasklist() can also take a lockless approach for scanning the tasklist, instead of using the big global tasklist_lock. For this we also kill another deprecated and rcu-unsafe tsk->thread_group user replacing it with for_each_process_thread(), maintaining semantics. Here tasklist_lock does not protect anything other than the list against concurrent fork/exit. And considering that the whole thing is capped by FTRACE_RETSTACK_ALLOC_SIZE (32), it should not be a problem to have a pontentially stale, yet stable, list. The task cannot go away either, so we don't risk racing with ftrace_graph_exit_task() which clears the retstack. The tsk->ret_stack management is not protected by tasklist_lock, being serialized with the corresponding publish/subscribe barriers against concurrent ftrace_push_return_trace(). In addition this plays nicer with cachelines by avoiding two atomic ops in the uncontended case. Link: https://lkml.kernel.org/r/20200907013326.9870-1-dave@stgolabs.net Acked-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/fgraph.c')
-rw-r--r--kernel/trace/fgraph.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/fgraph.c b/kernel/trace/fgraph.c
index 1af321dec0f1..5658f13037b3 100644
--- a/kernel/trace/fgraph.c
+++ b/kernel/trace/fgraph.c
@@ -387,8 +387,8 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
}
}
- read_lock(&tasklist_lock);
- do_each_thread(g, t) {
+ rcu_read_lock();
+ for_each_process_thread(g, t) {
if (start == end) {
ret = -EAGAIN;
goto unlock;
@@ -403,10 +403,10 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
smp_wmb();
t->ret_stack = ret_stack_list[start++];
}
- } while_each_thread(g, t);
+ }
unlock:
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
free:
for (i = start; i < end; i++)
kfree(ret_stack_list[i]);