summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-01-20 21:44:41 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2023-01-20 21:44:41 +0100
commitedc00350d205d2de8871b514c8f9b403d588e5d1 (patch)
tree861929c5276365774ee4ecad7ae223c0687dd04d /block
parentMerge tag 'io_uring-6.2-2023-01-20' of git://git.kernel.dk/linux (diff)
parentMerge tag 'nvme-6.2-2023-01-20' of git://git.infradead.org/nvme into block-6.2 (diff)
downloadlinux-edc00350d205d2de8871b514c8f9b403d588e5d1.tar.xz
linux-edc00350d205d2de8871b514c8f9b403d588e5d1.zip
Merge tag 'block-6.2-2023-01-20' of git://git.kernel.dk/linux
Pull block fixes from Jens Axboe: "Various little tweaks all over the place: - NVMe pull request via Christoph: - fix controller shutdown regression in nvme-apple (Janne Grunau) - fix a polling on timeout regression in nvme-pci (Keith Busch) - Fix a bug in the read request side request allocation caching (Pavel) - pktcdvd was brought back after we configured a NULL return on bio splits, make it consistent with the others (me) - BFQ refcount fix (Yu) - Block cgroup policy activation fix (Yu) - Fix for an md regression introduced in the 6.2 cycle (Adrian)" * tag 'block-6.2-2023-01-20' of git://git.kernel.dk/linux: nvme-pci: fix timeout request state check nvme-apple: only reset the controller when RTKit is running nvme-apple: reset controller during shutdown block: fix hctx checks for batch allocation block/rnbd-clt: fix wrong max ID in ida_alloc_max blk-cgroup: fix missing pd_online_fn() while activating policy pktcdvd: check for NULL returna fter calling bio_split_to_limits() block, bfq: switch 'bfqg->ref' to use atomic refcount apis md: fix incorrect declaration about claim_rdev in md_import_device
Diffstat (limited to 'block')
-rw-r--r--block/bfq-cgroup.c8
-rw-r--r--block/bfq-iosched.h2
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-mq.c6
4 files changed, 13 insertions, 7 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 1b2829e99dad..7d9b15f0dbd5 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -316,14 +316,12 @@ struct bfq_group *bfqq_group(struct bfq_queue *bfqq)
static void bfqg_get(struct bfq_group *bfqg)
{
- bfqg->ref++;
+ refcount_inc(&bfqg->ref);
}
static void bfqg_put(struct bfq_group *bfqg)
{
- bfqg->ref--;
-
- if (bfqg->ref == 0)
+ if (refcount_dec_and_test(&bfqg->ref))
kfree(bfqg);
}
@@ -530,7 +528,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, struct request_queue *q,
}
/* see comments in bfq_bic_update_cgroup for why refcounting */
- bfqg_get(bfqg);
+ refcount_set(&bfqg->ref, 1);
return &bfqg->pd;
}
diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
index 41aa151ccc22..466e4865ace6 100644
--- a/block/bfq-iosched.h
+++ b/block/bfq-iosched.h
@@ -928,7 +928,7 @@ struct bfq_group {
char blkg_path[128];
/* reference counter (see comments in bfq_bic_update_cgroup) */
- int ref;
+ refcount_t ref;
/* Is bfq_group still online? */
bool online;
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index ce6a2b7d3dfb..4c94a6560f62 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -1455,6 +1455,10 @@ retry:
list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
pol->pd_init_fn(blkg->pd[pol->plid]);
+ if (pol->pd_online_fn)
+ list_for_each_entry_reverse(blkg, &q->blkg_list, q_node)
+ pol->pd_online_fn(blkg->pd[pol->plid]);
+
__set_bit(pol->plid, q->blkcg_pols);
ret = 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 2c49b4151da1..9d463f7563bc 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2890,6 +2890,7 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
{
struct request *rq;
+ enum hctx_type type, hctx_type;
if (!plug)
return NULL;
@@ -2902,7 +2903,10 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
return NULL;
}
- if (blk_mq_get_hctx_type((*bio)->bi_opf) != rq->mq_hctx->type)
+ type = blk_mq_get_hctx_type((*bio)->bi_opf);
+ hctx_type = rq->mq_hctx->type;
+ if (type != hctx_type &&
+ !(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;