summaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-07-01 18:29:17 +0200
committerJens Axboe <axboe@fb.com>2014-07-01 18:29:17 +0200
commit776687bce42bb22cce48b5da950e48ebbb9a948f (patch)
tree68901461bfd070246574f1e2440ba1ef2ae93ec0 /block/blk-core.c
parentblk-mq: fix a memory ordering bug in blk_mq_queue_enter() (diff)
downloadlinux-776687bce42bb22cce48b5da950e48ebbb9a948f.tar.xz
linux-776687bce42bb22cce48b5da950e48ebbb9a948f.zip
block, blk-mq: draining can't be skipped even if bypass_depth was non-zero
Currently, both blk_queue_bypass_start() and blk_mq_freeze_queue() skip queue draining if bypass_depth was already above zero. The assumption is that the one which bumped the bypass_depth should have performed draining already; however, there's nothing which prevents a new instance of bypassing/freezing from starting before the previous one finishes draining. The current code may allow the later bypassing/freezing instances to complete while there still are in-flight requests which haven't finished draining. Fix it by draining regardless of bypass_depth. We still skip draining from blk_queue_bypass_start() while the queue is initializing to avoid introducing excessive delays during boot. INIT_DONE setting is moved above the initial blk_queue_bypass_end() so that bypassing attempts can't slip inbetween. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Jens Axboe <axboe@kernel.dk> Cc: Nicholas A. Bellinger <nab@linux-iscsi.org> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 6f8dba161bfe..0d0bdd65b2d7 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -438,14 +438,17 @@ static void __blk_drain_queue(struct request_queue *q, bool drain_all)
*/
void blk_queue_bypass_start(struct request_queue *q)
{
- bool drain;
-
spin_lock_irq(q->queue_lock);
- drain = !q->bypass_depth++;
+ q->bypass_depth++;
queue_flag_set(QUEUE_FLAG_BYPASS, q);
spin_unlock_irq(q->queue_lock);
- if (drain) {
+ /*
+ * Queues start drained. Skip actual draining till init is
+ * complete. This avoids lenghty delays during queue init which
+ * can happen many times during boot.
+ */
+ if (blk_queue_init_done(q)) {
spin_lock_irq(q->queue_lock);
__blk_drain_queue(q, false);
spin_unlock_irq(q->queue_lock);