summaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorBart Van Assche <bart.vanassche@sandisk.com>2017-04-07 20:16:49 +0200
committerJens Axboe <axboe@fb.com>2017-04-07 20:45:47 +0200
commit705cda97ee3abb6ea38d651b54f4da83c2bb2a4a (patch)
treefdc8eae0529160e721890e586080ae4d40163d7a /block/blk-mq.c
parentblk-mq: use true instead of 1 for blk_mq_queue_data.last (diff)
downloadlinux-705cda97ee3abb6ea38d651b54f4da83c2bb2a4a.tar.xz
linux-705cda97ee3abb6ea38d651b54f4da83c2bb2a4a.zip
blk-mq: Make it safe to use RCU to iterate over blk_mq_tag_set.tag_list
Since the next patch in this series will use RCU to iterate over tag_list, make this safe. Add lockdep_assert_held() statements in functions that iterate over tag_list to make clear that using list_for_each_entry() instead of list_for_each_entry_rcu() is fine in these functions. Signed-off-by: Bart Van Assche <bart.vanassche@sandisk.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hannes Reinecke <hare@suse.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to '')
-rw-r--r--block/blk-mq.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 9bdfeed59d9d..ad057fe572a4 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2111,6 +2111,8 @@ static void blk_mq_update_tag_set_depth(struct blk_mq_tag_set *set, bool shared)
{
struct request_queue *q;
+ lockdep_assert_held(&set->tag_list_lock);
+
list_for_each_entry(q, &set->tag_list, tag_set_list) {
blk_mq_freeze_queue(q);
queue_set_hctx_shared(q, shared);
@@ -2123,7 +2125,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
struct blk_mq_tag_set *set = q->tag_set;
mutex_lock(&set->tag_list_lock);
- list_del_init(&q->tag_set_list);
+ list_del_rcu(&q->tag_set_list);
+ INIT_LIST_HEAD(&q->tag_set_list);
if (list_is_singular(&set->tag_list)) {
/* just transitioned to unshared */
set->flags &= ~BLK_MQ_F_TAG_SHARED;
@@ -2131,6 +2134,8 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q)
blk_mq_update_tag_set_depth(set, false);
}
mutex_unlock(&set->tag_list_lock);
+
+ synchronize_rcu();
}
static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
@@ -2148,7 +2153,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set,
}
if (set->flags & BLK_MQ_F_TAG_SHARED)
queue_set_hctx_shared(q, true);
- list_add_tail(&q->tag_set_list, &set->tag_list);
+ list_add_tail_rcu(&q->tag_set_list, &set->tag_list);
mutex_unlock(&set->tag_list_lock);
}
@@ -2639,6 +2644,8 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues)
{
struct request_queue *q;
+ lockdep_assert_held(&set->tag_list_lock);
+
if (nr_hw_queues > nr_cpu_ids)
nr_hw_queues = nr_cpu_ids;
if (nr_hw_queues < 1 || nr_hw_queues == set->nr_hw_queues)