summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2018-09-11 20:41:27 +0200
committerJens Axboe <axboe@kernel.dk>2018-09-22 04:29:03 +0200
commit49f4c2dc2b5066e9211101c59cc0828e81d41614 (patch)
tree217391585a20535d3c2606d98659786bf73682e2 /block
parentblkcg: fix ref count issue with bio_blkcg using task_css (diff)
downloadlinux-49f4c2dc2b5066e9211101c59cc0828e81d41614.tar.xz
linux-49f4c2dc2b5066e9211101c59cc0828e81d41614.zip
blkcg: update blkg_lookup_create to do locking
To know when to create a blkg, the general pattern is to do a blkg_lookup and if that fails, lock and then do a lookup again and if that fails finally create. It doesn't make much sense for everyone who wants to do creation to write this themselves. This changes blkg_lookup_create to do locking and implement this pattern. The old blkg_lookup_create is renamed to __blkg_lookup_create. If a call site wants to do its own error handling or already owns the queue lock, they can use __blkg_lookup_create. This will be used in upcoming patches. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Reviewed-by: Josef Bacik <josef@toxicpanda.com> Acked-by: Tejun Heo <tj@kernel.org> Reviewed-by: Liu Bo <bo.liu@linux.alibaba.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c31
-rw-r--r--block/blk-iolatency.c2
2 files changed, 29 insertions, 4 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index c19f9078da1e..cd0d97bed83d 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -259,7 +259,7 @@ err_free_blkg:
}
/**
- * blkg_lookup_create - lookup blkg, try to create one if not there
+ * __blkg_lookup_create - lookup blkg, try to create one if not there
* @blkcg: blkcg of interest
* @q: request_queue of interest
*
@@ -272,8 +272,8 @@ err_free_blkg:
* value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
* dead and bypassing, returns ERR_PTR(-EBUSY).
*/
-struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
- struct request_queue *q)
+struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
{
struct blkcg_gq *blkg;
@@ -310,6 +310,31 @@ struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
}
}
+/**
+ * blkg_lookup_create - find or create a blkg
+ * @blkcg: target block cgroup
+ * @q: target request_queue
+ *
+ * This looks up or creates the blkg representing the unique pair
+ * of the blkcg and the request_queue.
+ */
+struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
+ struct request_queue *q)
+{
+ struct blkcg_gq *blkg = blkg_lookup(blkcg, q);
+ unsigned long flags;
+
+ if (unlikely(!blkg)) {
+ spin_lock_irqsave(q->queue_lock, flags);
+
+ blkg = __blkg_lookup_create(blkcg, q);
+
+ spin_unlock_irqrestore(q->queue_lock, flags);
+ }
+
+ return blkg;
+}
+
static void blkg_destroy(struct blkcg_gq *blkg)
{
struct blkcg *blkcg = blkg->blkcg;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 82450c37f2aa..ffde3ab9f84c 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -407,7 +407,7 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
if (unlikely(!blkg)) {
if (!lock)
spin_lock_irq(q->queue_lock);
- blkg = blkg_lookup_create(blkcg, q);
+ blkg = __blkg_lookup_create(blkcg, q);
if (IS_ERR(blkg))
blkg = NULL;
if (!lock)