diff options
author | Tejun Heo <tj@kernel.org> | 2014-11-04 19:52:27 +0100 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-11-04 22:49:31 +0100 |
commit | f3af020b9a8d298022b811a19719df0cf461efa5 (patch) | |
tree | 7468b9e6077738b941c89433d491662c7b8da8b4 /block/blk-mq.c | |
parent | block: Fix computation of merged request priority (diff) | |
download | linux-f3af020b9a8d298022b811a19719df0cf461efa5.tar.xz linux-f3af020b9a8d298022b811a19719df0cf461efa5.zip |
blk-mq: make mq_queue_reinit_notify() freeze queues in parallel
q->mq_usage_counter is a percpu_ref which is killed and drained when
the queue is frozen. On a CPU hotplug event, blk_mq_queue_reinit()
which involves freezing the queue is invoked on all existing queues.
Because percpu_ref killing and draining involve a RCU grace period,
doing the above on one queue after another may take a long time if
there are many queues on the system.
This patch splits out initiation of freezing and waiting for its
completion, and updates blk_mq_queue_reinit_notify() so that the
queues are frozen in parallel instead of one after another. Note that
freezing and unfreezing are moved from blk_mq_queue_reinit() to
blk_mq_queue_reinit_notify().
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Christian Borntraeger <borntraeger@de.ibm.com>
Tested-by: Christian Borntraeger <borntraeger@de.ibm.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 41 |
1 files changed, 33 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 68929bad9a6a..1d016fc9a8b6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -/* - * Guarantee no request is in use, so we can change any data structure of - * the queue afterward. - */ -void blk_mq_freeze_queue(struct request_queue *q) +static void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q) percpu_ref_kill(&q->mq_usage_counter); blk_mq_run_queues(q, false); } +} + +static void blk_mq_freeze_queue_wait(struct request_queue *q) +{ wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); } +/* + * Guarantee no request is in use, so we can change any data structure of + * the queue afterward. + */ +void blk_mq_freeze_queue(struct request_queue *q) +{ + blk_mq_freeze_queue_start(q); + blk_mq_freeze_queue_wait(q); +} + static void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q) /* Basically redo blk_mq_init_queue with queue frozen */ static void blk_mq_queue_reinit(struct request_queue *q) { - blk_mq_freeze_queue(q); + WARN_ON_ONCE(!q->mq_freeze_depth); blk_mq_sysfs_unregister(q); @@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q) blk_mq_map_swqueue(q); blk_mq_sysfs_register(q); - - blk_mq_unfreeze_queue(q); } static int blk_mq_queue_reinit_notify(struct notifier_block *nb, @@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb, return NOTIFY_OK; mutex_lock(&all_q_mutex); + + /* + * We need to freeze and reinit all existing queues. Freezing + * involves synchronous wait for an RCU grace period and doing it + * one by one may take a long time. Start freezing all queues in + * one swoop and then wait for the completions so that freezing can + * take place in parallel. + */ + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_start(q); + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_freeze_queue_wait(q); + list_for_each_entry(q, &all_q_list, all_q_node) blk_mq_queue_reinit(q); + + list_for_each_entry(q, &all_q_list, all_q_node) + blk_mq_unfreeze_queue(q); + mutex_unlock(&all_q_mutex); return NOTIFY_OK; } |