summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-03-19 20:28:10 +0100
committerTejun Heo <tj@kernel.org>2013-03-20 19:00:57 +0100
commit881094532e2a27406a5f06f839087bd152a8a494 (patch)
tree993b36c51374695056781f6bb01f258aea3e71cc /kernel/workqueue.c
parentworkqueue: kick a worker in pwq_adjust_max_active() (diff)
downloadlinux-881094532e2a27406a5f06f839087bd152a8a494.tar.xz
linux-881094532e2a27406a5f06f839087bd152a8a494.zip
workqueue: use rcu_read_lock_sched() instead for accessing pwq in RCU
rcu_read_lock_sched() is better than preempt_disable() if the code is protected by RCU_SCHED. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 79d1d347e690..b6c5a524d7c4 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3962,7 +3962,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
struct pool_workqueue *pwq;
bool ret;
- preempt_disable();
+ rcu_read_lock_sched();
if (!(wq->flags & WQ_UNBOUND))
pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
@@ -3970,7 +3970,7 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
pwq = first_pwq(wq);
ret = !list_empty(&pwq->delayed_works);
- preempt_enable();
+ rcu_read_unlock_sched();
return ret;
}
@@ -4354,16 +4354,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
- preempt_disable();
+ rcu_read_lock_sched();
for_each_pwq(pwq, wq) {
WARN_ON_ONCE(pwq->nr_active < 0);
if (pwq->nr_active) {
busy = true;
- preempt_enable();
+ rcu_read_unlock_sched();
goto out_unlock;
}
}
- preempt_enable();
+ rcu_read_unlock_sched();
}
out_unlock:
mutex_unlock(&wq_mutex);