From 216f764716f34fe68cedc7296ae2043a7727e640 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Tue, 3 Jan 2023 16:47:55 +0800 Subject: block, bfq: switch 'bfqg->ref' to use atomic refcount apis The updating of 'bfqg->ref' should be protected by 'bfqd->lock', however, during code review, we found that bfq_pd_free() update 'bfqg->ref' without holding the lock, which is problematic: 1) bfq_pd_free() triggered by removing cgroup is called asynchronously; 2) bfqq will grab bfqg reference, and exit bfqq will drop the reference, which can concurrent with 1). Unfortunately, 'bfqd->lock' can't be held here because 'bfqd' might already be freed in bfq_pd_free(). Fix the problem by using atomic refcount apis. Signed-off-by: Yu Kuai Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230103084755.1256479-1-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe --- block/bfq-cgroup.c | 8 +++----- block/bfq-iosched.h | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) (limited to 'block') diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 1b2829e99dad..7d9b15f0dbd5 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq) static void bfqg_get(struct bfq_group *bfqg) { - bfqg->ref++; + refcount_inc(&bfqg->ref); } static void bfqg_put(struct bfq_group *bfqg) { - bfqg->ref--; - - if (bfqg->ref == 0) + if (refcount_dec_and_test(&bfqg->ref)) kfree(bfqg); } @@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q, } /* see comments in bfq_bic_update_cgroup for why refcounting */ - bfqg_get(bfqg); + refcount_set(&bfqg->ref, 1); return &bfqg->pd; } diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h index 41aa151ccc22..466e4865ace6 100644 --- a/block/bfq-iosched.h +++ b/block/bfq-iosched.h @@ -928,7 +928,7 @@ struct bfq_group { char blkg_path[128]; /* reference counter (see comments in bfq_bic_update_cgroup) */ - int ref; + refcount_t ref; /* Is bfq_group still online? */ bool online; -- cgit v1.2.3 From e3ff8887e7db757360f97634e0d6f4b8e27a8c46 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Tue, 3 Jan 2023 19:28:33 +0800 Subject: blk-cgroup: fix missing pd_online_fn() while activating policy If the policy defines pd_online_fn(), it should be called after pd_init_fn(), like blkg_create(). Signed-off-by: Yu Kuai Acked-by: Tejun Heo Link: https://lore.kernel.org/r/20230103112833.2013432-1-yukuai1@huaweicloud.com Signed-off-by: Jens Axboe --- block/blk-cgroup.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'block') diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index ce6a2b7d3dfb..4c94a6560f62 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c @@ -1455,6 +1455,10 @@ retry: list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) pol->pd_init_fn(blkg->pd[pol->plid]); + if (pol->pd_online_fn) + list_for_each_entry_reverse(blkg, &q->blkg_list, q_node) + pol->pd_online_fn(blkg->pd[pol->plid]); + __set_bit(pol->plid, q->blkcg_pols); ret = 0; -- cgit v1.2.3 From 7746564793978fe2f43b18a302b22dca0ad3a0e8 Mon Sep 17 00:00:00 2001 From: Pavel Begunkov Date: Tue, 17 Jan 2023 11:42:15 +0000 Subject: block: fix hctx checks for batch allocation When there are no read queues read requests will be assigned a default queue on allocation. However, blk_mq_get_cached_request() is not prepared for that and will fail all attempts to grab read requests from the cache. Worst case it doubles the number of requests allocated, roughly half of which will be returned by blk_mq_free_plug_rqs(). It only affects batched allocations and so is io_uring specific. For reference, QD8 t/io_uring benchmark improves by 20-35%. Signed-off-by: Pavel Begunkov Link: https://lore.kernel.org/r/80d4511011d7d4751b4cf6375c4e38f237d935e3.1673955390.git.asml.silence@gmail.com Signed-off-by: Jens Axboe --- block/blk-mq.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'block') diff --git a/block/blk-mq.c b/block/blk-mq.c index 2c49b4151da1..9d463f7563bc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, struct blk_plug *plug, struct bio **bio, unsigned int nsegs) { struct request *rq; + enum hctx_type type, hctx_type; if (!plug) return NULL; @@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q, return NULL; } - if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type) + type = blk_mq_get_hctx_type((*bio)->bi_opf); + hctx_type = rq->mq_hctx->type; + if (type != hctx_type && + !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT)) return NULL; if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf)) return NULL; -- cgit v1.2.3