summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-06-27 09:31:24 +0200
committerAl Viro <viro@zeniv.linux.org.uk>2012-07-22 21:57:57 +0200
commited3e694d78cc75fa79bf29698631b146fd27aa35 (patch)
tree97dad6768546b489a3d09c42cef0738b57f4822e
parentmerge task_work and rcu_head, get rid of separate allocation for keyring case (diff)
downloadlinux-ed3e694d78cc75fa79bf29698631b146fd27aa35.tar.xz
linux-ed3e694d78cc75fa79bf29698631b146fd27aa35.zip
move exit_task_work() past exit_files() et.al.
... and get rid of PF_EXITING check in task_work_add(). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/task_work.c30
2 files changed, 13 insertions, 23 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f59cc334516..d17f6c4ddfa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -953,14 +953,11 @@ void do_exit(long code)
exit_signals(tsk); /* sets PF_EXITING */
/*
* tsk->flags are checked in the futex code to protect against
- * an exiting task cleaning up the robust pi futexes, and in
- * task_work_add() to avoid the race with exit_task_work().
+ * an exiting task cleaning up the robust pi futexes.
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
- exit_task_work(tsk);
-
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, task_pid_nr(current),
@@ -995,6 +992,7 @@ void do_exit(long code)
exit_shm(tsk);
exit_files(tsk);
exit_fs(tsk);
+ exit_task_work(tsk);
check_stack_usage();
exit_thread();
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 76266fb665dc..fb396089f66a 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -5,34 +5,26 @@
int
task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
{
+ struct callback_head *last, *first;
unsigned long flags;
- int err = -ESRCH;
-#ifndef TIF_NOTIFY_RESUME
- if (notify)
- return -ENOTSUPP;
-#endif
/*
- * We must not insert the new work if the task has already passed
- * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
- * and check PF_EXITING under pi_lock.
+ * Not inserting the new work if the task has already passed
+ * exit_task_work() is the responisbility of callers.
*/
raw_spin_lock_irqsave(&task->pi_lock, flags);
- if (likely(!(task->flags & PF_EXITING))) {
- struct callback_head *last = task->task_works;
- struct callback_head *first = last ? last->next : twork;
- twork->next = first;
- if (last)
- last->next = twork;
- task->task_works = twork;
- err = 0;
- }
+ last = task->task_works;
+ first = last ? last->next : twork;
+ twork->next = first;
+ if (last)
+ last->next = twork;
+ task->task_works = twork;
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
/* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
- if (likely(!err) && notify)
+ if (notify)
set_notify_resume(task);
- return err;
+ return 0;
}
struct callback_head *