diff options
author | Christoph Hellwig <hch@lst.de> | 2021-09-29 09:12:39 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2021-10-16 05:02:47 +0200 |
commit | a6741536f44ae65491cfde6870ae518aabe1ca36 (patch) | |
tree | 5cf8803796ba5519add7a1ce416c415dedc16cad /block | |
parent | block: factor out a blk_try_enter_queue helper (diff) | |
download | linux-a6741536f44ae65491cfde6870ae518aabe1ca36.tar.xz linux-a6741536f44ae65491cfde6870ae518aabe1ca36.zip |
block: split bio_queue_enter from blk_queue_enter
To prepare for fixing a gendisk shutdown race, open code the
blk_queue_enter logic in bio_queue_enter. This also removes the
pointless flags translation.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Darrick J. Wong <djwong@kernel.org>
Link: https://lore.kernel.org/r/20210929071241.934472-4-hch@lst.de
Tested-by: Yi Zhang <yi.zhang@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 33 |
1 files changed, 25 insertions, 8 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 7e9eadacf2de..43f5da707d8e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -475,18 +475,35 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) static inline int bio_queue_enter(struct bio *bio) { struct request_queue *q = bio->bi_bdev->bd_disk->queue; - bool nowait = bio->bi_opf & REQ_NOWAIT; - int ret; - ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); - if (unlikely(ret)) { - if (nowait && !blk_queue_dying(q)) + while (!blk_try_enter_queue(q, false)) { + if (bio->bi_opf & REQ_NOWAIT) { + if (blk_queue_dying(q)) + goto dead; bio_wouldblock_error(bio); - else - bio_io_error(bio); + return -EBUSY; + } + + /* + * read pair of barrier in blk_freeze_queue_start(), we need to + * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and + * reading .mq_freeze_depth or queue dying flag, otherwise the + * following wait may never return if the two reads are + * reordered. + */ + smp_rmb(); + wait_event(q->mq_freeze_wq, + (!q->mq_freeze_depth && + blk_pm_resume_queue(false, q)) || + blk_queue_dying(q)); + if (blk_queue_dying(q)) + goto dead; } - return ret; + return 0; +dead: + bio_io_error(bio); + return -ENODEV; } void blk_queue_exit(struct request_queue *q) |