diff options
author | Oleg Nesterov <oleg@redhat.com> | 2012-10-08 19:13:01 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-10-08 23:53:20 +0200 |
commit | d5bbd43d5f450c3fca058f5b85f3dfb4e8cc88c9 (patch) | |
tree | 9ee712ea3c2768dc9934e1e6003680793f303a00 | |
parent | Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/cmar... (diff) | |
download | linux-d5bbd43d5f450c3fca058f5b85f3dfb4e8cc88c9.tar.xz linux-d5bbd43d5f450c3fca058f5b85f3dfb4e8cc88c9.zip |
exec: make de_thread() killable
Change de_thread() to use KILLABLE rather than UNINTERRUPTIBLE while
waiting for other threads. The only complication is that we should
clear ->group_exit_task and ->notify_count before we return, and we
should do this under tasklist_lock. -EAGAIN is used to match the
initial signal_group_exit() check/return, it doesn't really matter.
This fixes the (unlikely) race with coredump. de_thread() checks
signal_group_exit() before it starts to kill the subthreads, but this
can't help if another CLONE_VM (but non CLONE_THREAD) task starts the
coredumping after de_thread() unlocks ->siglock. In this case the
killed sub-thread can block in exit_mm() waiting for coredump_finish(),
execing thread waits for that sub-thead, and the coredumping thread
waits for execing thread. Deadlock.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | fs/exec.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/fs/exec.c b/fs/exec.c index 9824473a7ec1..19f4fb80cd17 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -878,9 +878,11 @@ static int de_thread(struct task_struct *tsk) sig->notify_count--; while (sig->notify_count) { - __set_current_state(TASK_UNINTERRUPTIBLE); + __set_current_state(TASK_KILLABLE); spin_unlock_irq(lock); schedule(); + if (unlikely(__fatal_signal_pending(tsk))) + goto killed; spin_lock_irq(lock); } spin_unlock_irq(lock); @@ -898,9 +900,11 @@ static int de_thread(struct task_struct *tsk) write_lock_irq(&tasklist_lock); if (likely(leader->exit_state)) break; - __set_current_state(TASK_UNINTERRUPTIBLE); + __set_current_state(TASK_KILLABLE); write_unlock_irq(&tasklist_lock); schedule(); + if (unlikely(__fatal_signal_pending(tsk))) + goto killed; } /* @@ -994,6 +998,14 @@ no_thread_group: BUG_ON(!thread_group_leader(tsk)); return 0; + +killed: + /* protects against exit_notify() and __exit_signal() */ + read_lock(&tasklist_lock); + sig->group_exit_task = NULL; + sig->notify_count = 0; + read_unlock(&tasklist_lock); + return -EAGAIN; } char *get_task_comm(char *buf, struct task_struct *tsk) |