diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-09 20:20:07 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-11-09 20:20:07 +0100 |
commit | 3e28850cbd359bed841b832200f9fc208a9ef040 (patch) | |
tree | ef4e5b294f934f58fc08feb89d24291b71c01d4a /drivers | |
parent | Merge tag 'for-5.16/bdev-size-2021-11-09' of git://git.kernel.dk/linux-block (diff) | |
parent | nvme: wait until quiesce is done (diff) | |
download | linux-3e28850cbd359bed841b832200f9fc208a9ef040.tar.xz linux-3e28850cbd359bed841b832200f9fc208a9ef040.zip |
Merge tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
- Set of fixes for the batched tag allocation (Ming, me)
- add_disk() error handling fix (Luis)
- Nested queue quiesce fixes (Ming)
- Shared tags init error handling fix (Ye)
- Misc cleanups (Jean, Ming, me)
* tag 'for-5.16/block-2021-11-09' of git://git.kernel.dk/linux-block:
nvme: wait until quiesce is done
scsi: make sure that request queue queiesce and unquiesce balanced
scsi: avoid to quiesce sdev->request_queue two times
blk-mq: add one API for waiting until quiesce is done
blk-mq: don't free tags if the tag_set is used by other device in queue initialztion
block: fix device_add_disk() kobject_create_and_add() error handling
block: ensure cached plug request matches the current queue
block: move queue enter logic into blk_mq_submit_bio()
block: make bio_queue_enter() fast-path available inline
block: split request allocation components into helpers
block: have plug stored requests hold references to the queue
blk-mq: update hctx->nr_active in blk_mq_end_request_batch()
blk-mq: add RQF_ELV debug entry
blk-mq: only try to run plug merge if request has same queue with incoming bio
block: move RQF_ELV setting into allocators
dm: don't stop request queue after the dm device is suspended
block: replace always false argument with 'false'
block: assign correct tag before doing prefetch of request
blk-mq: fix redundant check of !e expression
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/dm.c | 10 | ||||
-rw-r--r-- | drivers/nvme/host/core.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 62 |
3 files changed, 44 insertions, 32 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8d3157241262..662742a310cb 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1927,16 +1927,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, dm_table_event_callback(t, event_callback, md); - /* - * The queue hasn't been stopped yet, if the old table type wasn't - * for request-based during suspension. So stop it to prevent - * I/O mapping before resume. - * This must be done before setting the queue restrictions, - * because request-based dm may be run just after the setting. - */ - if (request_based) - dm_stop_queue(q); - if (request_based) { /* * Leverage the fact that request-based DM targets are diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 838b5e2058be..4b5de8f5435a 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4518,6 +4518,8 @@ static void nvme_stop_ns_queue(struct nvme_ns *ns) { if (!test_and_set_bit(NVME_NS_STOPPED, &ns->flags)) blk_mq_quiesce_queue(ns->queue); + else + blk_mq_wait_quiesce_done(ns->queue); } /* @@ -4637,6 +4639,8 @@ void nvme_stop_admin_queue(struct nvme_ctrl *ctrl) { if (!test_and_set_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->flags)) blk_mq_quiesce_queue(ctrl->admin_q); + else + blk_mq_wait_quiesce_done(ctrl->admin_q); } EXPORT_SYMBOL_GPL(nvme_stop_admin_queue); diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1344553afe70..b731c2983515 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2665,6 +2665,40 @@ scsi_target_resume(struct scsi_target *starget) } EXPORT_SYMBOL(scsi_target_resume); +static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) +{ + if (scsi_device_set_state(sdev, SDEV_BLOCK)) + return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); + + return 0; +} + +void scsi_start_queue(struct scsi_device *sdev) +{ + if (cmpxchg(&sdev->queue_stopped, 1, 0)) + blk_mq_unquiesce_queue(sdev->request_queue); +} + +static void scsi_stop_queue(struct scsi_device *sdev, bool nowait) +{ + /* + * The atomic variable of ->queue_stopped covers that + * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. + * + * However, we still need to wait until quiesce is done + * in case that queue has been stopped. + */ + if (!cmpxchg(&sdev->queue_stopped, 0, 1)) { + if (nowait) + blk_mq_quiesce_queue_nowait(sdev->request_queue); + else + blk_mq_quiesce_queue(sdev->request_queue); + } else { + if (!nowait) + blk_mq_wait_quiesce_done(sdev->request_queue); + } +} + /** * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state * @sdev: device to block @@ -2681,24 +2715,16 @@ EXPORT_SYMBOL(scsi_target_resume); */ int scsi_internal_device_block_nowait(struct scsi_device *sdev) { - struct request_queue *q = sdev->request_queue; - int err = 0; - - err = scsi_device_set_state(sdev, SDEV_BLOCK); - if (err) { - err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); - - if (err) - return err; - } + int ret = __scsi_internal_device_block_nowait(sdev); /* * The device has transitioned to SDEV_BLOCK. Stop the * block layer from calling the midlayer with this device's * request queue. */ - blk_mq_quiesce_queue_nowait(q); - return 0; + if (!ret) + scsi_stop_queue(sdev, true); + return ret; } EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); @@ -2719,25 +2745,17 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); */ static int scsi_internal_device_block(struct scsi_device *sdev) { - struct request_queue *q = sdev->request_queue; int err; mutex_lock(&sdev->state_mutex); - err = scsi_internal_device_block_nowait(sdev); + err = __scsi_internal_device_block_nowait(sdev); if (err == 0) - blk_mq_quiesce_queue(q); + scsi_stop_queue(sdev, false); mutex_unlock(&sdev->state_mutex); return err; } -void scsi_start_queue(struct scsi_device *sdev) -{ - struct request_queue *q = sdev->request_queue; - - blk_mq_unquiesce_queue(q); -} - /** * scsi_internal_device_unblock_nowait - resume a device after a block request * @sdev: device to resume |