summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c55
1 files changed, 30 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 729ac6a44860..dd2a4c49a39a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -510,12 +510,11 @@ static int worker_pool_assign_id(struct worker_pool *pool)
lockdep_assert_held(&wq_pool_mutex);
- do {
- if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
- return -ENOMEM;
- ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
- } while (ret == -EAGAIN);
-
+ ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
+ if (ret >= 0) {
+ pool->id = ret;
+ return 0;
+ }
return ret;
}
@@ -4408,28 +4407,34 @@ static void wq_unbind_fn(struct work_struct *work)
spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->manager_mutex);
- }
- /*
- * Call schedule() so that we cross rq->lock and thus can guarantee
- * sched callbacks see the %WORKER_UNBOUND flag. This is necessary
- * as scheduler callbacks may be invoked from other cpus.
- */
- schedule();
+ /*
+ * Call schedule() so that we cross rq->lock and thus can
+ * guarantee sched callbacks see the %WORKER_UNBOUND flag.
+ * This is necessary as scheduler callbacks may be invoked
+ * from other cpus.
+ */
+ schedule();
- /*
- * Sched callbacks are disabled now. Zap nr_running. After this,
- * nr_running stays zero and need_more_worker() and keep_working()
- * are always true as long as the worklist is not empty. Pools on
- * @cpu now behave as unbound (in terms of concurrency management)
- * pools which are served by workers tied to the CPU.
- *
- * On return from this function, the current worker would trigger
- * unbound chain execution of pending work items if other workers
- * didn't already.
- */
- for_each_cpu_worker_pool(pool, cpu)
+ /*
+ * Sched callbacks are disabled now. Zap nr_running.
+ * After this, nr_running stays zero and need_more_worker()
+ * and keep_working() are always true as long as the
+ * worklist is not empty. This pool now behaves as an
+ * unbound (in terms of concurrency management) pool which
+ * are served by workers tied to the pool.
+ */
atomic_set(&pool->nr_running, 0);
+
+ /*
+ * With concurrency management just turned off, a busy
+ * worker blocking could lead to lengthy stalls. Kick off
+ * unbound chain execution of currently pending work items.
+ */
+ spin_lock_irq(&pool->lock);
+ wake_up_worker(pool);
+ spin_unlock_irq(&pool->lock);
+ }
}
/**