diff options
author | Tejun Heo <tj@kernel.org> | 2016-02-09 23:59:38 +0100 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2016-02-09 23:59:38 +0100 |
commit | f303fccb82928790ec58eea82722bd5c54d300b3 (patch) | |
tree | 20030c728af441a4efa01c2612be4883bd431115 /kernel/workqueue.c | |
parent | workqueue: schedule WORK_CPU_UNBOUND work on wq_unbound_cpumask CPUs (diff) | |
download | linux-f303fccb82928790ec58eea82722bd5c54d300b3.tar.xz linux-f303fccb82928790ec58eea82722bd5c54d300b3.zip |
workqueue: implement "workqueue.debug_force_rr_cpu" debug feature
Workqueue used to guarantee local execution for work items queued
without explicit target CPU. The guarantee is gone now which can
break some usages in subtle ways. To flush out those cases, this
patch implements a debug feature which forces round-robin CPU
selection for all such work items.
The debug feature defaults to off and can be enabled with a kernel
parameter. The default can be flipped with a debug config option.
If you hit this commit during bisection, please refer to 041bd12e272c
("Revert "workqueue: make sure delayed work run in local cpu"") for
more information and ping me.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 23 |
1 files changed, 21 insertions, 2 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 054774605d2f..51d77e7c0989 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -307,6 +307,18 @@ static cpumask_var_t wq_unbound_cpumask; /* CPU where unbound work was last round robin scheduled from this CPU */ static DEFINE_PER_CPU(int, wq_rr_cpu_last); +/* + * Local execution of unbound work items is no longer guaranteed. The + * following always forces round-robin CPU selection on unbound work items + * to uncover usages which depend on it. + */ +#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU +static bool wq_debug_force_rr_cpu = true; +#else +static bool wq_debug_force_rr_cpu = false; +#endif +module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644); + /* the per-cpu worker pools */ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], cpu_worker_pools); @@ -1309,10 +1321,17 @@ static bool is_chained_work(struct workqueue_struct *wq) */ static int wq_select_unbound_cpu(int cpu) { + static bool printed_dbg_warning; int new_cpu; - if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) - return cpu; + if (likely(!wq_debug_force_rr_cpu)) { + if (cpumask_test_cpu(cpu, wq_unbound_cpumask)) + return cpu; + } else if (!printed_dbg_warning) { + pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n"); + printed_dbg_warning = true; + } + if (cpumask_empty(wq_unbound_cpumask)) return cpu; |