summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2019-09-23 20:08:58 +0200
committerTejun Heo <tj@kernel.org>2019-10-04 19:23:01 +0200
commitc29eb85386880750130a01aabf288408a6614d65 (patch)
treea1611859442a41355f2182432afb2bec93921d01 /kernel/workqueue.c
parentworkqueue: Minor follow-ups to the rescuer destruction change (diff)
downloadlinux-c29eb85386880750130a01aabf288408a6614d65.tar.xz
linux-c29eb85386880750130a01aabf288408a6614d65.zip
workqueue: more destroy_workqueue() fixes
destroy_workqueue() warnings still, at a lower frequency, trigger spuriously. The problem seems to be in-flight operations which haven't reached put_pwq() yet. * Make sanity check grab all the related locks so that it's synchronized against operations which puts pwq at the end. * Always print out the offending pwq. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: "Williams, Gerald S" <gerald.s.williams@intel.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c45
1 files changed, 31 insertions, 14 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7f7aa45dac37..4a3c30177b94 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -355,6 +355,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
+static void show_pwq(struct pool_workqueue *pwq);
#define CREATE_TRACE_POINTS
#include <trace/events/workqueue.h>
@@ -4314,6 +4315,22 @@ err_destroy:
}
EXPORT_SYMBOL_GPL(alloc_workqueue);
+static bool pwq_busy(struct pool_workqueue *pwq)
+{
+ int i;
+
+ for (i = 0; i < WORK_NR_COLORS; i++)
+ if (pwq->nr_in_flight[i])
+ return true;
+
+ if ((pwq != pwq->wq->dfl_pwq) && (pwq->refcnt > 1))
+ return true;
+ if (pwq->nr_active || !list_empty(&pwq->delayed_works))
+ return true;
+
+ return false;
+}
+
/**
* destroy_workqueue - safely terminate a workqueue
* @wq: target workqueue
@@ -4348,28 +4365,28 @@ void destroy_workqueue(struct workqueue_struct *wq)
kfree(rescuer);
}
- /* sanity checks */
+ /*
+ * Sanity checks - grab all the locks so that we wait for all
+ * in-flight operations which may do put_pwq().
+ */
+ mutex_lock(&wq_pool_mutex);
mutex_lock(&wq->mutex);
for_each_pwq(pwq, wq) {
- int i;
-
- for (i = 0; i < WORK_NR_COLORS; i++) {
- if (WARN_ON(pwq->nr_in_flight[i])) {
- mutex_unlock(&wq->mutex);
- show_workqueue_state();
- return;
- }
- }
-
- if (WARN_ON((pwq != wq->dfl_pwq) && (pwq->refcnt > 1)) ||
- WARN_ON(pwq->nr_active) ||
- WARN_ON(!list_empty(&pwq->delayed_works))) {
+ spin_lock_irq(&pwq->pool->lock);
+ if (WARN_ON(pwq_busy(pwq))) {
+ pr_warning("%s: %s has the following busy pwq (refcnt=%d)\n",
+ __func__, wq->name, pwq->refcnt);
+ show_pwq(pwq);
+ spin_unlock_irq(&pwq->pool->lock);
mutex_unlock(&wq->mutex);
+ mutex_unlock(&wq_pool_mutex);
show_workqueue_state();
return;
}
+ spin_unlock_irq(&pwq->pool->lock);
}
mutex_unlock(&wq->mutex);
+ mutex_unlock(&wq_pool_mutex);
/*
* wq list is used to freeze wq, remove from list after