diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:14 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 10:07:14 +0200 |
commit | b71ab8c2025caef8db719aa41af0ed735dc543cd (patch) | |
tree | 8cc2c6164acf5fe82e8d8d05924590cb80fe088d /kernel/workqueue.c | |
parent | workqueue: implement concurrency managed dynamic worker pool (diff) | |
download | linux-b71ab8c2025caef8db719aa41af0ed735dc543cd.tar.xz linux-b71ab8c2025caef8db719aa41af0ed735dc543cd.zip |
workqueue: increase max_active of keventd and kill current_is_keventd()
Define WQ_MAX_ACTIVE and create keventd with max_active set to half of
it which means that keventd now can process upto WQ_MAX_ACTIVE / 2 - 1
works concurrently. Unless some combination can result in dependency
loop longer than max_active, deadlock won't happen and thus it's
unnecessary to check whether current_is_keventd() before trying to
schedule a work. Kill current_is_keventd().
(Lockdep annotations are broken. We need lock_map_acquire_read_norecurse())
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 63 |
1 files changed, 13 insertions, 50 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ad46523b423..4190e84cf995 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -2398,7 +2398,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); int schedule_on_each_cpu(work_func_t func) { int cpu; - int orig = -1; struct work_struct *works; works = alloc_percpu(struct work_struct); @@ -2407,23 +2406,12 @@ int schedule_on_each_cpu(work_func_t func) get_online_cpus(); - /* - * When running in keventd don't schedule a work item on - * itself. Can just call directly because the work queue is - * already bound. This also is faster. - */ - if (current_is_keventd()) - orig = raw_smp_processor_id(); - for_each_online_cpu(cpu) { struct work_struct *work = per_cpu_ptr(works, cpu); INIT_WORK(work, func); - if (cpu != orig) - schedule_work_on(cpu, work); + schedule_work_on(cpu, work); } - if (orig >= 0) - func(per_cpu_ptr(works, orig)); for_each_online_cpu(cpu) flush_work(per_cpu_ptr(works, cpu)); @@ -2494,41 +2482,6 @@ int keventd_up(void) return keventd_wq != NULL; } -int current_is_keventd(void) -{ - bool found = false; - unsigned int cpu; - - /* - * There no longer is one-to-one relation between worker and - * work queue and a worker task might be unbound from its cpu - * if the cpu was offlined. Match all busy workers. This - * function will go away once dynamic pool is implemented. - */ - for_each_possible_cpu(cpu) { - struct global_cwq *gcwq = get_gcwq(cpu); - struct worker *worker; - struct hlist_node *pos; - unsigned long flags; - int i; - - spin_lock_irqsave(&gcwq->lock, flags); - - for_each_busy_worker(worker, i, pos, gcwq) { - if (worker->task == current) { - found = true; - break; - } - } - - spin_unlock_irqrestore(&gcwq->lock, flags); - if (found) - break; - } - - return found; -} - static struct cpu_workqueue_struct *alloc_cwqs(void) { /* @@ -2576,6 +2529,16 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) #endif } +static int wq_clamp_max_active(int max_active, const char *name) +{ + if (max_active < 1 || max_active > WQ_MAX_ACTIVE) + printk(KERN_WARNING "workqueue: max_active %d requested for %s " + "is out of range, clamping between %d and %d\n", + max_active, name, 1, WQ_MAX_ACTIVE); + + return clamp_val(max_active, 1, WQ_MAX_ACTIVE); +} + struct workqueue_struct *__create_workqueue_key(const char *name, unsigned int flags, int max_active, @@ -2585,7 +2548,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, struct workqueue_struct *wq; unsigned int cpu; - max_active = clamp_val(max_active, 1, INT_MAX); + max_active = wq_clamp_max_active(max_active, name); wq = kzalloc(sizeof(*wq), GFP_KERNEL); if (!wq) @@ -3324,6 +3287,6 @@ void __init init_workqueues(void) spin_unlock_irq(&gcwq->lock); } - keventd_wq = create_workqueue("events"); + keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); BUG_ON(!keventd_wq); } |