summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2023-10-09 11:33:20 +0200
committerJens Axboe <axboe@kernel.dk>2023-10-17 16:27:56 +0200
commitbd23f6c2c2d00518e2f27f2d25cef795de9bee56 (patch)
tree9aaf304876a5a6e841338714719061ef5c8e4d93 /drivers/block
parentublk: rename mm_lock as lock (diff)
downloadlinux-bd23f6c2c2d00518e2f27f2d25cef795de9bee56.tar.xz
linux-bd23f6c2c2d00518e2f27f2d25cef795de9bee56.zip
ublk: quiesce request queue when aborting queue
So far aborting queue ends request when the ubq daemon is exiting, and it can be run concurrently with ublk_queue_rq(), this way is fragile and we depend on the tricky usage of UBLK_IO_FLAG_ABORTED for avoiding such race. Quiesce queue when aborting queue, and the two code paths can be run completely exclusively, then it becomes easier to add new ublk feature, such as relaxing single same task limit for each queue. Signed-off-by: Ming Lei <ming.lei@redhat.com> Link: https://lore.kernel.org/r/20231009093324.957829-6-ming.lei@redhat.com Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/ublk_drv.c59
1 files changed, 50 insertions, 9 deletions
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 01cd2be1df0e..f80e48ab7ba0 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -1441,25 +1441,59 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
}
}
-static void ublk_daemon_monitor_work(struct work_struct *work)
+static bool ublk_abort_requests(struct ublk_device *ub)
{
- struct ublk_device *ub =
- container_of(work, struct ublk_device, monitor_work.work);
+ struct gendisk *disk;
int i;
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
+ if (disk)
+ get_device(disk_to_dev(disk));
+ spin_unlock(&ub->lock);
+
+ /* Our disk has been dead */
+ if (!disk)
+ return false;
+
+ /* Now we are serialized with ublk_queue_rq() */
+ blk_mq_quiesce_queue(disk->queue);
for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
struct ublk_queue *ubq = ublk_get_queue(ub, i);
if (ubq_daemon_is_dying(ubq)) {
- if (ublk_queue_can_use_recovery(ubq))
- schedule_work(&ub->quiesce_work);
- else
- schedule_work(&ub->stop_work);
-
/* abort queue is for making forward progress */
ublk_abort_queue(ub, ubq);
}
}
+ blk_mq_unquiesce_queue(disk->queue);
+ put_device(disk_to_dev(disk));
+
+ return true;
+}
+
+static void ublk_daemon_monitor_work(struct work_struct *work)
+{
+ struct ublk_device *ub =
+ container_of(work, struct ublk_device, monitor_work.work);
+ int i;
+
+ for (i = 0; i < ub->dev_info.nr_hw_queues; i++) {
+ struct ublk_queue *ubq = ublk_get_queue(ub, i);
+
+ if (ubq_daemon_is_dying(ubq))
+ goto found;
+ }
+ return;
+
+found:
+ if (!ublk_abort_requests(ub))
+ return;
+
+ if (ublk_can_use_recovery(ub))
+ schedule_work(&ub->quiesce_work);
+ else
+ schedule_work(&ub->stop_work);
/*
* We can't schedule monitor work after ub's state is not UBLK_S_DEV_LIVE.
@@ -1594,6 +1628,8 @@ static void ublk_unquiesce_dev(struct ublk_device *ub)
static void ublk_stop_dev(struct ublk_device *ub)
{
+ struct gendisk *disk;
+
mutex_lock(&ub->mutex);
if (ub->dev_info.state == UBLK_S_DEV_DEAD)
goto unlock;
@@ -1603,10 +1639,15 @@ static void ublk_stop_dev(struct ublk_device *ub)
ublk_unquiesce_dev(ub);
}
del_gendisk(ub->ub_disk);
+
+ /* Sync with ublk_abort_queue() by holding the lock */
+ spin_lock(&ub->lock);
+ disk = ub->ub_disk;
ub->dev_info.state = UBLK_S_DEV_DEAD;
ub->dev_info.ublksrv_pid = -1;
- put_disk(ub->ub_disk);
ub->ub_disk = NULL;
+ spin_unlock(&ub->lock);
+ put_disk(disk);
unlock:
mutex_unlock(&ub->mutex);
ublk_cancel_dev(ub);