summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2022-09-20 17:21:17 +0200
committerChristoph Hellwig <hch@lst.de>2022-09-27 14:44:17 +0200
commitceee1953f9239b699d80c5ef366712c3fd865cc2 (patch)
treee16883e20453fd57b4b4f155d5d2ceadb692da8a /drivers/nvme
parentnvme-loop: store the generic nvme_ctrl in set->driver_data (diff)
downloadlinux-ceee1953f9239b699d80c5ef366712c3fd865cc2.tar.xz
linux-ceee1953f9239b699d80c5ef366712c3fd865cc2.zip
nvme-loop: use the tagset alloc/free helpers
Use the common helpers to allocate and free the tagsets. To make this work the generic nvme_ctrl now needs to be stored in the hctx private data instead of the nvme_loop_ctrl. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/target/loop.c83
1 files changed, 19 insertions, 64 deletions
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 54578cc18d52..b45fe3adf015 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -266,9 +266,7 @@ static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
return;
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
}
static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
@@ -282,10 +280,8 @@ static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_loop_ctrl_mutex);
- if (nctrl->tagset) {
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
- blk_mq_free_tag_set(&ctrl->tag_set);
- }
+ if (nctrl->tagset)
+ nvme_remove_io_tag_set(nctrl);
kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
@@ -350,52 +346,31 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
{
int error;
- memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
- ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
- ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
- ctrl->admin_tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->admin_tag_set.driver_data = &ctrl->ctrl;
- ctrl->admin_tag_set.nr_hw_queues = 1;
- ctrl->admin_tag_set.timeout = NVME_ADMIN_TIMEOUT;
- ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
-
ctrl->queues[0].ctrl = ctrl;
error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
if (error)
return error;
ctrl->ctrl.queue_count = 1;
- error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
+ error = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
+ &nvme_loop_admin_mq_ops, BLK_MQ_F_NO_SCHED,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (error)
goto out_free_sq;
- ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
-
- ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.fabrics_q)) {
- error = PTR_ERR(ctrl->ctrl.fabrics_q);
- goto out_free_tagset;
- }
- ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
- if (IS_ERR(ctrl->ctrl.admin_q)) {
- error = PTR_ERR(ctrl->ctrl.admin_q);
- goto out_cleanup_fabrics_q;
- }
/* reset stopped state for the fresh admin queue */
clear_bit(NVME_CTRL_ADMIN_Q_STOPPED, &ctrl->ctrl.flags);
error = nvmf_connect_admin_queue(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
error = nvme_enable_ctrl(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
ctrl->ctrl.max_hw_sectors =
(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
@@ -404,17 +379,13 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
error = nvme_init_ctrl_finish(&ctrl->ctrl);
if (error)
- goto out_cleanup_queue;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_queue:
+out_cleanup_tagset:
clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
- blk_mq_destroy_queue(ctrl->ctrl.admin_q);
-out_cleanup_fabrics_q:
- blk_mq_destroy_queue(ctrl->ctrl.fabrics_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->admin_tag_set);
+ nvme_remove_admin_tag_set(&ctrl->ctrl);
out_free_sq:
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
return error;
@@ -522,37 +493,21 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
if (ret)
return ret;
- memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
- ctrl->tag_set.ops = &nvme_loop_mq_ops;
- ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
- ctrl->tag_set.reserved_tags = NVMF_RESERVED_TAGS;
- ctrl->tag_set.numa_node = ctrl->ctrl.numa_node;
- ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
- ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
- NVME_INLINE_SG_CNT * sizeof(struct scatterlist);
- ctrl->tag_set.driver_data = &ctrl->ctrl;
- ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
- ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
- ctrl->ctrl.tagset = &ctrl->tag_set;
-
- ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
+ ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
+ &nvme_loop_mq_ops, BLK_MQ_F_SHOULD_MERGE,
+ sizeof(struct nvme_loop_iod) +
+ NVME_INLINE_SG_CNT * sizeof(struct scatterlist));
if (ret)
goto out_destroy_queues;
- ret = nvme_ctrl_init_connect_q(&(ctrl->ctrl));
- if (ret)
- goto out_free_tagset;
-
ret = nvme_loop_connect_io_queues(ctrl);
if (ret)
- goto out_cleanup_connect_q;
+ goto out_cleanup_tagset;
return 0;
-out_cleanup_connect_q:
- blk_mq_destroy_queue(ctrl->ctrl.connect_q);
-out_free_tagset:
- blk_mq_free_tag_set(&ctrl->tag_set);
+out_cleanup_tagset:
+ nvme_remove_io_tag_set(&ctrl->ctrl);
out_destroy_queues:
nvme_loop_destroy_io_queues(ctrl);
return ret;