summaryrefslogtreecommitdiffstats
path: root/block/blk-mq-sched.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2017-01-26 22:42:34 +0100
committerJens Axboe <axboe@fb.com>2017-01-27 16:20:34 +0100
commit50e1dab86aa2c10cbca2f754aae9542169403141 (patch)
tree2e17cd0d604bf63d82e4f8838b57ec579690ca52 /block/blk-mq-sched.c
parentblk-mq: release driver tag on a requeue event (diff)
downloadlinux-50e1dab86aa2c10cbca2f754aae9542169403141.tar.xz
linux-50e1dab86aa2c10cbca2f754aae9542169403141.zip
blk-mq-sched: fix starvation for multiple hardware queues and shared tags
If we have both multiple hardware queues and shared tag map between devices, we need to ensure that we propagate the hardware queue restart bit higher up. This is because we can get into a situation where we don't have any IO pending on a hardware queue, yet we fail getting a tag to start new IO. If that happens, it's not enough to mark the hardware queue as needing a restart, we need to bubble that up to the higher level queue as well. Signed-off-by: Jens Axboe <axboe@fb.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Tested-by: Hannes Reinecke <hare@suse.com>
Diffstat (limited to 'block/blk-mq-sched.c')
-rw-r--r--block/blk-mq-sched.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index 4cee060a292d..fcc0e893d687 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -301,6 +301,34 @@ bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq)
}
EXPORT_SYMBOL_GPL(blk_mq_sched_bypass_insert);
+static void blk_mq_sched_restart_hctx(struct blk_mq_hw_ctx *hctx)
+{
+ if (test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
+ clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
+ if (blk_mq_hctx_has_pending(hctx))
+ blk_mq_run_hw_queue(hctx, true);
+ }
+}
+
+void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx)
+{
+ unsigned int i;
+
+ if (!(hctx->flags & BLK_MQ_F_TAG_SHARED))
+ blk_mq_sched_restart_hctx(hctx);
+ else {
+ struct request_queue *q = hctx->queue;
+
+ if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
+ return;
+
+ clear_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ blk_mq_sched_restart_hctx(hctx);
+ }
+}
+
static void blk_mq_sched_free_tags(struct blk_mq_tag_set *set,
struct blk_mq_hw_ctx *hctx,
unsigned int hctx_idx)