summaryrefslogtreecommitdiffstats
path: root/drivers/nvme
diff options
context:
space:
mode:
authorNitzan Carmi <nitzanc@mellanox.com>2018-03-20 12:07:30 +0100
committerJens Axboe <axboe@kernel.dk>2018-03-26 16:53:43 +0200
commitb435ecea2a4d0b5cd5be2c5497c3461435f3f3a7 (patch)
tree80a22ddb03658c3b9f7c64d82f254ede50929b1b /drivers/nvme
parentnvme-rdma: Allow DELETING state change failure in error_recovery (diff)
downloadlinux-b435ecea2a4d0b5cd5be2c5497c3461435f3f3a7.tar.xz
linux-b435ecea2a4d0b5cd5be2c5497c3461435f3f3a7.zip
nvme: Add .stop_ctrl to nvme ctrl ops
For consistancy reasons, any fabric-specific works (e.g error recovery/reconnect) should be canceled in nvme_stop_ctrl, as for all other NVMe pending works (e.g. scan, keep alive). The patch aims to simplify the logic of the code, as we now only rely on a vague demand from any fabric to flush its private workqueues at the beginning of .delete_ctrl op. Signed-off-by: Nitzan Carmi <nitzanc@mellanox.com> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c2
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/rdma.c12
3 files changed, 12 insertions, 3 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 7811b4886e63..ad99dd76dcd2 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -3370,6 +3370,8 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
flush_work(&ctrl->async_event_work);
flush_work(&ctrl->scan_work);
cancel_work_sync(&ctrl->fw_act_work);
+ if (ctrl->ops->stop_ctrl)
+ ctrl->ops->stop_ctrl(ctrl);
}
EXPORT_SYMBOL_GPL(nvme_stop_ctrl);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 29942b1892f7..741e3c79bbe9 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -313,6 +313,7 @@ struct nvme_ctrl_ops {
void (*delete_ctrl)(struct nvme_ctrl *ctrl);
int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
int (*reinit_request)(void *data, struct request *rq);
+ void (*stop_ctrl)(struct nvme_ctrl *ctrl);
};
#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 5e731f2c329c..758537e9ba07 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -867,6 +867,14 @@ out_free_io_queues:
return ret;
}
+static void nvme_rdma_stop_ctrl(struct nvme_ctrl *nctrl)
+{
+ struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
+
+ cancel_work_sync(&ctrl->err_work);
+ cancel_delayed_work_sync(&ctrl->reconnect_work);
+}
+
static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl);
@@ -1718,9 +1726,6 @@ static const struct blk_mq_ops nvme_rdma_admin_mq_ops = {
static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
- cancel_work_sync(&ctrl->err_work);
- cancel_delayed_work_sync(&ctrl->reconnect_work);
-
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
@@ -1798,6 +1803,7 @@ static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
+ .stop_ctrl = nvme_rdma_stop_ctrl,
};
static inline bool