summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/Kconfig2
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/configfs.c5
-rw-r--r--drivers/nvme/target/core.c11
-rw-r--r--drivers/nvme/target/fabrics-cmd.c38
-rw-r--r--drivers/nvme/target/loop.c4
-rw-r--r--drivers/nvme/target/nvmet.h11
-rw-r--r--drivers/nvme/target/passthru.c14
-rw-r--r--drivers/nvme/target/trace.c18
-rw-r--r--drivers/nvme/target/zns.c5
10 files changed, 77 insertions, 33 deletions
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index 4be2ececbc45..973561c93888 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -31,7 +31,6 @@ config NVME_TARGET_PASSTHRU
config NVME_TARGET_LOOP
tristate "NVMe loopback device support"
depends on NVME_TARGET
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
help
@@ -65,7 +64,6 @@ config NVME_TARGET_FC
config NVME_TARGET_FCLOOP
tristate "NVMe over Fabrics FC Transport Loopback Test driver"
depends on NVME_TARGET
- select NVME_CORE
select NVME_FABRICS
select SG_POOL
depends on NVME_FC
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 0cb98f2bbc8c..aa6d84d8848e 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1015,7 +1015,7 @@ u16 nvmet_parse_admin_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_admin_cmd(req);
switch (cmd->common.opcode) {
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 273555127188..d784f3c200b4 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -1028,7 +1028,7 @@ nvmet_subsys_attr_version_store_locked(struct nvmet_subsys *subsys,
}
/* passthru subsystems use the underlying controller's version */
- if (nvmet_passthru_ctrl(subsys))
+ if (nvmet_is_passthru_subsys(subsys))
return -EINVAL;
ret = sscanf(page, "%d.%d.%d\n", &major, &minor, &tertiary);
@@ -1067,7 +1067,8 @@ static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item,
{
struct nvmet_subsys *subsys = to_subsys(item);
- return snprintf(page, PAGE_SIZE, "%s\n", subsys->serial);
+ return snprintf(page, PAGE_SIZE, "%*s\n",
+ NVMET_SN_MAX_SIZE, subsys->serial);
}
static ssize_t
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index ac7210a3ea1c..b8425fa34300 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -553,7 +553,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
mutex_lock(&subsys->lock);
ret = 0;
- if (nvmet_passthru_ctrl(subsys)) {
+ if (nvmet_is_passthru_subsys(subsys)) {
pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
goto out_unlock;
}
@@ -802,6 +802,7 @@ void nvmet_sq_destroy(struct nvmet_sq *sq)
* controller teardown as a result of a keep-alive expiration.
*/
ctrl->reset_tbkas = true;
+ sq->ctrl->sqs[sq->qid] = NULL;
nvmet_ctrl_put(ctrl);
sq->ctrl = NULL; /* allows reusing the queue later */
}
@@ -868,7 +869,7 @@ static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
if (unlikely(ret))
return ret;
- if (nvmet_req_passthru_ctrl(req))
+ if (nvmet_is_passthru_req(req))
return nvmet_parse_passthru_io_cmd(req);
ret = nvmet_req_find_ns(req);
@@ -1205,6 +1206,9 @@ static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
ctrl->cap |= (15ULL << 24);
/* maximum queue entries supported: */
ctrl->cap |= NVMET_QUEUE_SIZE - 1;
+
+ if (nvmet_is_passthru_subsys(ctrl->subsys))
+ nvmet_passthrough_override_cap(ctrl);
}
struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
@@ -1362,8 +1366,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
goto out_put_subsystem;
mutex_init(&ctrl->lock);
- nvmet_init_cap(ctrl);
-
ctrl->port = req->port;
INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
@@ -1377,6 +1379,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
kref_init(&ctrl->ref);
ctrl->subsys = subsys;
+ nvmet_init_cap(ctrl);
WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 7d0f3523fdab..7d0454cee920 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -109,21 +109,38 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
u16 qid = le16_to_cpu(c->qid);
u16 sqsize = le16_to_cpu(c->sqsize);
struct nvmet_ctrl *old;
+ u16 mqes = NVME_CAP_MQES(ctrl->cap);
u16 ret;
- old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
- if (old) {
- pr_warn("queue already connected!\n");
- req->error_loc = offsetof(struct nvmf_connect_command, opcode);
- return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
- }
if (!sqsize) {
pr_warn("queue size zero!\n");
req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
goto err;
}
+ if (ctrl->sqs[qid] != NULL) {
+ pr_warn("qid %u has already been created\n", qid);
+ req->error_loc = offsetof(struct nvmf_connect_command, qid);
+ return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
+ }
+
+ if (sqsize > mqes) {
+ pr_warn("sqsize %u is larger than MQES supported %u cntlid %d\n",
+ sqsize, mqes, ctrl->cntlid);
+ req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
+ return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
+ }
+
+ old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
+ if (old) {
+ pr_warn("queue already connected!\n");
+ req->error_loc = offsetof(struct nvmf_connect_command, opcode);
+ return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
+ }
+
/* note: convert queue size from 0's-based value to 1's-based value */
nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
@@ -138,6 +155,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (ret) {
pr_err("failed to install queue %d cntlid %d ret %x\n",
qid, ctrl->cntlid, ret);
+ ctrl->sqs[qid] = NULL;
goto err;
}
}
@@ -260,11 +278,11 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
}
status = nvmet_install_queue(ctrl, req);
- if (status) {
- /* pass back cntlid that had the issue of installing queue */
- req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
+ if (status)
goto out_ctrl_put;
- }
+
+ /* pass back cntlid for successful completion */
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index 3a17a7e26bbf..0285ccc7541f 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -107,10 +107,10 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
} else {
struct request *rq;
- rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
+ rq = nvme_find_rq(nvme_loop_tagset(queue), cqe->command_id);
if (!rq) {
dev_err(queue->ctrl->ctrl.device,
- "tag 0x%x on queue %d not found\n",
+ "got bad command_id %#x on queue %d\n",
cqe->command_id, nvme_loop_queue_idx(queue));
return;
}
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 06dd3d537f07..7143c7fa7464 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -582,7 +582,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys);
void nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys);
u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req);
u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req);
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return subsys->passthru_ctrl;
}
@@ -601,18 +601,19 @@ static inline u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req)
{
return 0;
}
-static inline struct nvme_ctrl *nvmet_passthru_ctrl(struct nvmet_subsys *subsys)
+static inline bool nvmet_is_passthru_subsys(struct nvmet_subsys *subsys)
{
return NULL;
}
#endif /* CONFIG_NVME_TARGET_PASSTHRU */
-static inline struct nvme_ctrl *
-nvmet_req_passthru_ctrl(struct nvmet_req *req)
+static inline bool nvmet_is_passthru_req(struct nvmet_req *req)
{
- return nvmet_passthru_ctrl(nvmet_req_subsys(req));
+ return nvmet_is_passthru_subsys(nvmet_req_subsys(req));
}
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl);
+
u16 errno_to_nvme_status(struct nvmet_req *req, int errno);
u16 nvmet_report_invalid_opcode(struct nvmet_req *req);
diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c
index 225cd1ffbe45..f0efb3537989 100644
--- a/drivers/nvme/target/passthru.c
+++ b/drivers/nvme/target/passthru.c
@@ -20,6 +20,16 @@ MODULE_IMPORT_NS(NVME_TARGET_PASSTHRU);
*/
static DEFINE_XARRAY(passthru_subsystems);
+void nvmet_passthrough_override_cap(struct nvmet_ctrl *ctrl)
+{
+ /*
+ * Multiple command set support can only be declared if the underlying
+ * controller actually supports it.
+ */
+ if (!nvme_multi_css(ctrl->subsys->passthru_ctrl))
+ ctrl->cap &= ~(1ULL << 43);
+}
+
static u16 nvmet_passthru_override_id_ctrl(struct nvmet_req *req)
{
struct nvmet_ctrl *ctrl = req->sq->ctrl;
@@ -218,7 +228,7 @@ static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq)
static void nvmet_passthru_execute_cmd(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct request_queue *q = ctrl->admin_q;
struct nvme_ns *ns = NULL;
struct request *rq = NULL;
@@ -299,7 +309,7 @@ out:
*/
static void nvmet_passthru_set_host_behaviour(struct nvmet_req *req)
{
- struct nvme_ctrl *ctrl = nvmet_req_passthru_ctrl(req);
+ struct nvme_ctrl *ctrl = nvmet_req_subsys(req)->passthru_ctrl;
struct nvme_feat_host_behavior *host;
u16 status = NVME_SC_INTERNAL;
int ret;
diff --git a/drivers/nvme/target/trace.c b/drivers/nvme/target/trace.c
index 1373a3c67962..bff454d46255 100644
--- a/drivers/nvme/target/trace.c
+++ b/drivers/nvme/target/trace.c
@@ -27,7 +27,7 @@ static const char *nvmet_trace_admin_get_features(struct trace_seq *p,
u8 sel = cdw10[1] & 0x7;
u32 cdw11 = get_unaligned_le32(cdw10 + 4);
- trace_seq_printf(p, "fid=0x%x sel=0x%x cdw11=0x%x", fid, sel, cdw11);
+ trace_seq_printf(p, "fid=0x%x, sel=0x%x, cdw11=0x%x", fid, sel, cdw11);
trace_seq_putc(p, 0);
return ret;
@@ -49,6 +49,20 @@ static const char *nvmet_trace_get_lba_status(struct trace_seq *p,
return ret;
}
+static const char *nvmet_trace_admin_set_features(struct trace_seq *p,
+ u8 *cdw10)
+{
+ const char *ret = trace_seq_buffer_ptr(p);
+ u8 fid = cdw10[0];
+ u8 sv = cdw10[3] & 0x8;
+ u32 cdw11 = get_unaligned_le32(cdw10 + 4);
+
+ trace_seq_printf(p, "fid=0x%x, sv=0x%x, cdw11=0x%x", fid, sv, cdw11);
+ trace_seq_putc(p, 0);
+
+ return ret;
+}
+
static const char *nvmet_trace_read_write(struct trace_seq *p, u8 *cdw10)
{
const char *ret = trace_seq_buffer_ptr(p);
@@ -94,6 +108,8 @@ const char *nvmet_trace_parse_admin_cmd(struct trace_seq *p,
switch (opcode) {
case nvme_admin_identify:
return nvmet_trace_admin_identify(p, cdw10);
+ case nvme_admin_set_features:
+ return nvmet_trace_admin_set_features(p, cdw10);
case nvme_admin_get_features:
return nvmet_trace_admin_get_features(p, cdw10);
case nvme_admin_get_lba_status:
diff --git a/drivers/nvme/target/zns.c b/drivers/nvme/target/zns.c
index 17f8b7a45f21..46bc30fe85d2 100644
--- a/drivers/nvme/target/zns.c
+++ b/drivers/nvme/target/zns.c
@@ -115,14 +115,11 @@ void nvmet_execute_identify_cns_cs_ns(struct nvmet_req *req)
}
status = nvmet_req_find_ns(req);
- if (status) {
- status = NVME_SC_INTERNAL;
+ if (status)
goto done;
- }
if (!bdev_is_zoned(req->ns->bdev)) {
req->error_loc = offsetof(struct nvme_identify, nsid);
- status = NVME_SC_INVALID_NS | NVME_SC_DNR;
goto done;
}