summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDennis Zhou (Facebook) <dennisszhou@gmail.com>2018-09-11 20:41:29 +0200
committerJens Axboe <axboe@kernel.dk>2018-09-22 04:29:06 +0200
commita7b39b4e961c4e2b3ed837803a7441a65c90ce33 (patch)
tree7dda6dda4a341971504ccd16a44dbda27d57a0f8 /block
parentblkcg: convert blkg_lookup_create to find closest blkg (diff)
downloadlinux-a7b39b4e961c4e2b3ed837803a7441a65c90ce33.tar.xz
linux-a7b39b4e961c4e2b3ed837803a7441a65c90ce33.zip
blkcg: always associate a bio with a blkg
Previously, blkg's were only assigned as needed by blk-iolatency and blk-throttle. bio->css was also always being associated while blkg was being looked up and then thrown away in blkcg_bio_issue_check. This patch begins the cleanup of bio->css and bio->bi_blkg by always associating a blkg in blkcg_bio_issue_check. This tries to create the blkg, but if it is not possible, falls back to using the root_blkg of the request_queue. Therefore, a bio will always be associated with a blkg. The duplicate association logic is removed from blk-throttle and blk-iolatency. Signed-off-by: Dennis Zhou <dennisszhou@gmail.com> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/bio.c38
-rw-r--r--block/blk-iolatency.c24
-rw-r--r--block/blk-throttle.c5
3 files changed, 41 insertions, 26 deletions
diff --git a/block/bio.c b/block/bio.c
index bfd41e8b53a8..748d7132f172 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -2029,6 +2029,41 @@ int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg)
}
/**
+ * bio_associate_create_blkg - associate a bio with a blkg from q
+ * @q: request_queue where bio is going
+ * @bio: target bio
+ *
+ * Associate @bio with the blkg found from the bio's css and the request_queue.
+ * If one is not found, bio_lookup_blkg creates the blkg.
+ */
+int bio_associate_create_blkg(struct request_queue *q, struct bio *bio)
+{
+ struct blkcg *blkcg;
+ struct blkcg_gq *blkg;
+ int ret = 0;
+
+ /* someone has already associated this bio with a blkg */
+ if (bio->bi_blkg)
+ return ret;
+
+ rcu_read_lock();
+
+ bio_associate_blkcg(bio, NULL);
+ blkcg = bio_blkcg(bio);
+
+ if (!blkcg->css.parent) {
+ ret = bio_associate_blkg(bio, q->root_blkg);
+ } else {
+ blkg = blkg_lookup_create(blkcg, q);
+
+ ret = bio_associate_blkg(bio, blkg);
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
* bio_disassociate_task - undo bio_associate_current()
* @bio: target bio
*/
@@ -2057,6 +2092,9 @@ void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
{
if (src->bi_css)
WARN_ON(bio_associate_blkcg(dst, src->bi_css));
+
+ if (src->bi_blkg)
+ bio_associate_blkg(dst, src->bi_blkg);
}
EXPORT_SYMBOL_GPL(bio_clone_blkcg_association);
#endif /* CONFIG_BLK_CGROUP */
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index ffde3ab9f84c..7337fbc7f850 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -392,34 +392,14 @@ static void blkcg_iolatency_throttle(struct rq_qos *rqos, struct bio *bio,
spinlock_t *lock)
{
struct blk_iolatency *blkiolat = BLKIOLATENCY(rqos);
- struct blkcg *blkcg;
- struct blkcg_gq *blkg;
- struct request_queue *q = rqos->q;
+ struct blkcg_gq *blkg = bio->bi_blkg;
bool issue_as_root = bio_issue_as_root_blkg(bio);
if (!blk_iolatency_enabled(blkiolat))
return;
- rcu_read_lock();
- bio_associate_blkcg(bio, NULL);
- blkcg = bio_blkcg(bio);
- blkg = blkg_lookup(blkcg, q);
- if (unlikely(!blkg)) {
- if (!lock)
- spin_lock_irq(q->queue_lock);
- blkg = __blkg_lookup_create(blkcg, q);
- if (IS_ERR(blkg))
- blkg = NULL;
- if (!lock)
- spin_unlock_irq(q->queue_lock);
- }
- if (!blkg)
- goto out;
-
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
- bio_associate_blkg(bio, blkg);
-out:
- rcu_read_unlock();
+
while (blkg && blkg->parent) {
struct iolatency_grp *iolat = blkg_to_lat(blkg);
if (!iolat) {
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index db1a3a2ae006..e62ae502891b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -2118,9 +2118,6 @@ static inline void throtl_update_latency_buckets(struct throtl_data *td)
static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
{
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- /* fallback to root_blkg if we fail to get a blkg ref */
- if (bio->bi_css && (bio_associate_blkg(bio, tg_to_blkg(tg)) == -ENODEV))
- bio_associate_blkg(bio, bio->bi_disk->queue->root_blkg);
bio_issue_init(&bio->bi_issue, bio_sectors(bio));
#endif
}
@@ -2129,7 +2126,7 @@ bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio)
{
struct throtl_qnode *qn = NULL;
- struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
+ struct throtl_grp *tg = blkg_to_tg(blkg);
struct throtl_service_queue *sq;
bool rw = bio_data_dir(bio);
bool throttled = false;