summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-07-17 21:39:26 +0200
committerTejun Heo <tj@kernel.org>2012-07-17 21:39:26 +0200
commitf2d5a0ee06c1813f985bb9386f3ccc0d0315720f (patch)
tree4207975fe000f95931b0c6876657db5b13f92b73 /kernel
parentworkqueue: perform cpu down operations from low priority cpu_notifier() (diff)
downloadlinux-f2d5a0ee06c1813f985bb9386f3ccc0d0315720f.tar.xz
linux-f2d5a0ee06c1813f985bb9386f3ccc0d0315720f.zip
workqueue: drop CPU_DYING notifier operation
Workqueue used CPU_DYING notification to mark GCWQ_DISASSOCIATED. This was necessary because workqueue's CPU_DOWN_PREPARE happened before other DOWN_PREPARE notifiers and workqueue needed to stay associated across the rest of DOWN_PREPARE. After the previous patch, workqueue's DOWN_PREPARE happens after others and can set GCWQ_DISASSOCIATED directly. Drop CPU_DYING and let the trustee set GCWQ_DISASSOCIATED after disabling concurrency management. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: "Rafael J. Wysocki" <rjw@sisk.pl>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c29
1 files changed, 13 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f59b7fd26e26..1405fb98c0b1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1250,11 +1250,11 @@ static void worker_leave_idle(struct worker *worker)
* verbatim as it's best effort and blocking and gcwq may be
* [dis]associated in the meantime.
*
- * This function tries set_cpus_allowed() and locks gcwq and verifies
- * the binding against GCWQ_DISASSOCIATED which is set during
- * CPU_DYING and cleared during CPU_ONLINE, so if the worker enters
- * idle state or fetches works without dropping lock, it can guarantee
- * the scheduling requirement described in the first paragraph.
+ * This function tries set_cpus_allowed() and locks gcwq and verifies the
+ * binding against %GCWQ_DISASSOCIATED which is set during
+ * %CPU_DOWN_PREPARE and cleared during %CPU_ONLINE, so if the worker
+ * enters idle state or fetches works without dropping lock, it can
+ * guarantee the scheduling requirement described in the first paragraph.
*
* CONTEXT:
* Might sleep. Called without any lock but returns with gcwq->lock
@@ -3349,6 +3349,12 @@ static int __cpuinit trustee_thread(void *__gcwq)
rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq));
BUG_ON(rc < 0);
+ /*
+ * We've claimed all manager positions. Make all workers unbound
+ * and set DISASSOCIATED. Before this, all workers except for the
+ * ones which are still executing works from before the last CPU
+ * down must be on the cpu. After this, they may become diasporas.
+ */
for_each_worker_pool(pool, gcwq) {
pool->flags |= POOL_MANAGING_WORKERS;
@@ -3359,6 +3365,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
for_each_busy_worker(worker, i, pos, gcwq)
worker->flags |= WORKER_ROGUE;
+ gcwq->flags |= GCWQ_DISASSOCIATED;
+
/*
* Call schedule() so that we cross rq->lock and thus can
* guarantee sched callbacks see the rogue flag. This is
@@ -3582,16 +3590,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
}
break;
- case CPU_DYING:
- /*
- * Before this, the trustee and all workers except for
- * the ones which are still executing works from
- * before the last CPU down must be on the cpu. After
- * this, they'll all be diasporas.
- */
- gcwq->flags |= GCWQ_DISASSOCIATED;
- break;
-
case CPU_POST_DEAD:
gcwq->trustee_state = TRUSTEE_BUTCHER;
/* fall through */
@@ -3672,7 +3670,6 @@ static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
{
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_DOWN_PREPARE:
- case CPU_DYING:
case CPU_POST_DEAD:
return workqueue_cpu_callback(nfb, action, hcpu);
}