summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/Kconfig1
-rw-r--r--drivers/nvme/target/admin-cmd.c5
-rw-r--r--drivers/nvme/target/configfs.c4
-rw-r--r--drivers/nvme/target/core.c38
-rw-r--r--drivers/nvme/target/discovery.c77
-rw-r--r--drivers/nvme/target/fabrics-cmd.c16
-rw-r--r--drivers/nvme/target/fc.c9
-rw-r--r--drivers/nvme/target/io-cmd-bdev.c6
-rw-r--r--drivers/nvme/target/io-cmd-file.c7
-rw-r--r--drivers/nvme/target/loop.c22
-rw-r--r--drivers/nvme/target/nvmet.h5
-rw-r--r--drivers/nvme/target/rdma.c21
-rw-r--r--drivers/nvme/target/tcp.c38
13 files changed, 141 insertions, 108 deletions
diff --git a/drivers/nvme/target/Kconfig b/drivers/nvme/target/Kconfig
index d94f25cde019..3ef0a4e5eed6 100644
--- a/drivers/nvme/target/Kconfig
+++ b/drivers/nvme/target/Kconfig
@@ -3,6 +3,7 @@ config NVME_TARGET
tristate "NVMe Target support"
depends on BLOCK
depends on CONFIGFS_FS
+ select SGL_ALLOC
help
This enabled target side support for the NVMe protocol, that is
it allows the Linux kernel to implement NVMe subsystems and
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 76250181fee0..9f72d515fc4b 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -24,6 +24,11 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len;
}
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
+{
+ return le64_to_cpu(cmd->get_log_page.lpo);
+}
+
static void nvmet_execute_get_log_page_noop(struct nvmet_req *req)
{
nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len));
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index adb79545cdd7..08dd5af357f7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -898,8 +898,8 @@ static struct config_group *nvmet_subsys_make(struct config_group *group,
}
subsys = nvmet_subsys_alloc(name, NVME_NQN_NVME);
- if (!subsys)
- return ERR_PTR(-ENOMEM);
+ if (IS_ERR(subsys))
+ return ERR_CAST(subsys);
config_group_init_type_name(&subsys->group, name, &nvmet_subsys_type);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b3e765a95af8..7734a6acff85 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -8,6 +8,7 @@
#include <linux/random.h>
#include <linux/rculist.h>
#include <linux/pci-p2pdma.h>
+#include <linux/scatterlist.h>
#include "nvmet.h"
@@ -214,6 +215,8 @@ void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&subsys->lock);
+
list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
@@ -494,13 +497,14 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
int ret;
mutex_lock(&subsys->lock);
- ret = -EMFILE;
- if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
- goto out_unlock;
ret = 0;
if (ns->enabled)
goto out_unlock;
+ ret = -EMFILE;
+ if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
+ goto out_unlock;
+
ret = nvmet_bdev_ns_enable(ns);
if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
@@ -644,7 +648,7 @@ static void nvmet_update_sq_head(struct nvmet_req *req)
} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
old_sqhd);
}
- req->rsp->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
+ req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
}
static void nvmet_set_error(struct nvmet_req *req, u16 status)
@@ -653,7 +657,7 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
struct nvme_error_slot *new_error_slot;
unsigned long flags;
- req->rsp->status = cpu_to_le16(status << 1);
+ req->cqe->status = cpu_to_le16(status << 1);
if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
return;
@@ -673,15 +677,15 @@ static void nvmet_set_error(struct nvmet_req *req, u16 status)
spin_unlock_irqrestore(&ctrl->error_lock, flags);
/* set the more bit for this request */
- req->rsp->status |= cpu_to_le16(1 << 14);
+ req->cqe->status |= cpu_to_le16(1 << 14);
}
static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
{
if (!req->sq->sqhd_disabled)
nvmet_update_sq_head(req);
- req->rsp->sq_id = cpu_to_le16(req->sq->qid);
- req->rsp->command_id = req->cmd->common.command_id;
+ req->cqe->sq_id = cpu_to_le16(req->sq->qid);
+ req->cqe->command_id = req->cmd->common.command_id;
if (unlikely(status))
nvmet_set_error(req, status);
@@ -838,8 +842,8 @@ bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
req->sg = NULL;
req->sg_cnt = 0;
req->transfer_len = 0;
- req->rsp->status = 0;
- req->rsp->sq_head = 0;
+ req->cqe->status = 0;
+ req->cqe->sq_head = 0;
req->ns = NULL;
req->error_loc = NVMET_NO_ERROR_LOC;
req->error_slba = 0;
@@ -1066,7 +1070,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
return NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
}
@@ -1087,7 +1091,7 @@ u16 nvmet_ctrl_find_get(const char *subsysnqn, const char *hostnqn, u16 cntlid,
pr_warn("could not find controller %d for subsys %s / host %s\n",
cntlid, subsysnqn, hostnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
out:
@@ -1185,7 +1189,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!subsys) {
pr_warn("connect request for invalid subsystem %s!\n",
subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
goto out;
}
@@ -1194,7 +1198,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
if (!nvmet_host_allowed(subsys, hostnqn)) {
pr_info("connect by host %s for subsystem %s not allowed\n",
hostnqn, subsysnqn);
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
up_read(&nvmet_config_sem);
status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
goto out_put_subsystem;
@@ -1364,7 +1368,7 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
if (!subsys)
- return NULL;
+ return ERR_PTR(-ENOMEM);
subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */
/* generate a random serial number as our controllers are ephemeral: */
@@ -1380,14 +1384,14 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
default:
pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
kfree(subsys);
- return NULL;
+ return ERR_PTR(-EINVAL);
}
subsys->type = type;
subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
GFP_KERNEL);
if (!subsys->subsysnqn) {
kfree(subsys);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
kref_init(&subsys->ref);
diff --git a/drivers/nvme/target/discovery.c b/drivers/nvme/target/discovery.c
index c872b47a88f3..5baf269f3f8a 100644
--- a/drivers/nvme/target/discovery.c
+++ b/drivers/nvme/target/discovery.c
@@ -30,14 +30,17 @@ void nvmet_port_disc_changed(struct nvmet_port *port,
{
struct nvmet_ctrl *ctrl;
+ lockdep_assert_held(&nvmet_config_sem);
nvmet_genctr++;
+ mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
continue;
__nvmet_disc_changed(port, ctrl);
}
+ mutex_unlock(&nvmet_disc_subsys->lock);
}
static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
@@ -46,12 +49,14 @@ static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
{
struct nvmet_ctrl *ctrl;
+ mutex_lock(&nvmet_disc_subsys->lock);
list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
continue;
__nvmet_disc_changed(port, ctrl);
}
+ mutex_unlock(&nvmet_disc_subsys->lock);
}
void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
@@ -131,54 +136,76 @@ static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port
memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
}
+static size_t discovery_log_entries(struct nvmet_req *req)
+{
+ struct nvmet_ctrl *ctrl = req->sq->ctrl;
+ struct nvmet_subsys_link *p;
+ struct nvmet_port *r;
+ size_t entries = 0;
+
+ list_for_each_entry(p, &req->port->subsystems, entry) {
+ if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
+ continue;
+ entries++;
+ }
+ list_for_each_entry(r, &req->port->referrals, entry)
+ entries++;
+ return entries;
+}
+
static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
{
const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
struct nvmet_ctrl *ctrl = req->sq->ctrl;
struct nvmf_disc_rsp_page_hdr *hdr;
+ u64 offset = nvmet_get_log_page_offset(req->cmd);
size_t data_len = nvmet_get_log_page_len(req->cmd);
- size_t alloc_len = max(data_len, sizeof(*hdr));
- int residual_len = data_len - sizeof(*hdr);
+ size_t alloc_len;
struct nvmet_subsys_link *p;
struct nvmet_port *r;
u32 numrec = 0;
u16 status = 0;
+ void *buffer;
+
+ /* Spec requires dword aligned offsets */
+ if (offset & 0x3) {
+ status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
+ goto out;
+ }
/*
* Make sure we're passing at least a buffer of response header size.
* If host provided data len is less than the header size, only the
* number of bytes requested by host will be sent to host.
*/
- hdr = kzalloc(alloc_len, GFP_KERNEL);
- if (!hdr) {
+ down_read(&nvmet_config_sem);
+ alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
+ buffer = kzalloc(alloc_len, GFP_KERNEL);
+ if (!buffer) {
+ up_read(&nvmet_config_sem);
status = NVME_SC_INTERNAL;
goto out;
}
- down_read(&nvmet_config_sem);
+ hdr = buffer;
list_for_each_entry(p, &req->port->subsystems, entry) {
+ char traddr[NVMF_TRADDR_SIZE];
+
if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
continue;
- if (residual_len >= entry_size) {
- char traddr[NVMF_TRADDR_SIZE];
-
- nvmet_set_disc_traddr(req, req->port, traddr);
- nvmet_format_discovery_entry(hdr, req->port,
- p->subsys->subsysnqn, traddr,
- NVME_NQN_NVME, numrec);
- residual_len -= entry_size;
- }
+
+ nvmet_set_disc_traddr(req, req->port, traddr);
+ nvmet_format_discovery_entry(hdr, req->port,
+ p->subsys->subsysnqn, traddr,
+ NVME_NQN_NVME, numrec);
numrec++;
}
list_for_each_entry(r, &req->port->referrals, entry) {
- if (residual_len >= entry_size) {
- nvmet_format_discovery_entry(hdr, r,
- NVME_DISC_SUBSYS_NAME,
- r->disc_addr.traddr,
- NVME_NQN_DISC, numrec);
- residual_len -= entry_size;
- }
+ nvmet_format_discovery_entry(hdr, r,
+ NVME_DISC_SUBSYS_NAME,
+ r->disc_addr.traddr,
+ NVME_NQN_DISC, numrec);
numrec++;
}
@@ -190,8 +217,8 @@ static void nvmet_execute_get_disc_log_page(struct nvmet_req *req)
up_read(&nvmet_config_sem);
- status = nvmet_copy_to_sgl(req, 0, hdr, data_len);
- kfree(hdr);
+ status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
+ kfree(buffer);
out:
nvmet_req_complete(req, status);
}
@@ -350,8 +377,8 @@ int __init nvmet_init_discovery(void)
{
nvmet_disc_subsys =
nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
- if (!nvmet_disc_subsys)
- return -ENOMEM;
+ if (IS_ERR(nvmet_disc_subsys))
+ return PTR_ERR(nvmet_disc_subsys);
return 0;
}
diff --git a/drivers/nvme/target/fabrics-cmd.c b/drivers/nvme/target/fabrics-cmd.c
index 3a76ebc3d155..3b9f79aba98f 100644
--- a/drivers/nvme/target/fabrics-cmd.c
+++ b/drivers/nvme/target/fabrics-cmd.c
@@ -72,7 +72,7 @@ static void nvmet_execute_prop_get(struct nvmet_req *req)
offsetof(struct nvmf_property_get_command, attrib);
}
- req->rsp->result.u64 = cpu_to_le64(val);
+ req->cqe->result.u64 = cpu_to_le64(val);
nvmet_req_complete(req, status);
}
@@ -124,7 +124,7 @@ static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
req->sq->sqhd_disabled = true;
- req->rsp->sq_head = cpu_to_le16(0xffff);
+ req->cqe->sq_head = cpu_to_le16(0xffff);
}
if (ctrl->ops->install_queue) {
@@ -158,7 +158,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
goto out;
/* zero out initial completion result, assign values as needed */
- req->rsp->result.u32 = 0;
+ req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -172,7 +172,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_warn("connect attempt for invalid controller ID %#x\n",
d->cntlid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
goto out;
}
@@ -195,7 +195,7 @@ static void nvmet_execute_admin_connect(struct nvmet_req *req)
pr_info("creating controller %d for subsystem %s for NQN %s.\n",
ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
- req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
out:
kfree(d);
@@ -222,7 +222,7 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
goto out;
/* zero out initial completion result, assign values as needed */
- req->rsp->result.u32 = 0;
+ req->cqe->result.u32 = 0;
if (c->recfmt != 0) {
pr_warn("invalid connect version (%d).\n",
@@ -240,14 +240,14 @@ static void nvmet_execute_io_connect(struct nvmet_req *req)
if (unlikely(qid > ctrl->subsys->max_qid)) {
pr_warn("invalid queue id (%d)\n", qid);
status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
- req->rsp->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
+ req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
goto out_ctrl_put;
}
status = nvmet_install_queue(ctrl, req);
if (status) {
/* pass back cntlid that had the issue of installing queue */
- req->rsp->result.u16 = cpu_to_le16(ctrl->cntlid);
+ req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
goto out_ctrl_put;
}
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 98b7b1f4ee96..508661af0f50 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -128,12 +128,12 @@ struct nvmet_fc_tgt_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvmet_fc_tgt_assoc *assoc;
- struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
struct list_head fod_list;
struct list_head pending_cmd_list;
struct list_head avail_defer_list;
struct workqueue_struct *work_q;
struct kref ref;
+ struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
} __aligned(sizeof(unsigned long long));
struct nvmet_fc_tgt_assoc {
@@ -588,9 +588,7 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (qid > NVMET_NR_QUEUES)
return NULL;
- queue = kzalloc((sizeof(*queue) +
- (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
- GFP_KERNEL);
+ queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
if (!queue)
return NULL;
@@ -603,7 +601,6 @@ nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
if (!queue->work_q)
goto out_a_put;
- queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
queue->qid = qid;
queue->sqsize = sqsize;
queue->assoc = assoc;
@@ -2187,7 +2184,7 @@ nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
}
fod->req.cmd = &fod->cmdiubuf.sqe;
- fod->req.rsp = &fod->rspiubuf.cqe;
+ fod->req.cqe = &fod->rspiubuf.cqe;
fod->req.port = tgtport->pe->port;
/* clear any response payload */
diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c
index a065dbfc43b1..3efc52f9c309 100644
--- a/drivers/nvme/target/io-cmd-bdev.c
+++ b/drivers/nvme/target/io-cmd-bdev.c
@@ -196,7 +196,7 @@ static u16 nvmet_bdev_discard_range(struct nvmet_req *req,
GFP_KERNEL, 0, bio);
if (ret && ret != -EOPNOTSUPP) {
req->error_slba = le64_to_cpu(range->slba);
- return blk_to_nvme_status(req, errno_to_blk_status(ret));
+ return errno_to_nvme_status(req, ret);
}
return NVME_SC_SUCCESS;
}
@@ -252,7 +252,6 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
{
struct nvme_write_zeroes_cmd *write_zeroes = &req->cmd->write_zeroes;
struct bio *bio = NULL;
- u16 status = NVME_SC_SUCCESS;
sector_t sector;
sector_t nr_sector;
int ret;
@@ -264,13 +263,12 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
ret = __blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, 0);
- status = blk_to_nvme_status(req, errno_to_blk_status(ret));
if (bio) {
bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done;
submit_bio(bio);
} else {
- nvmet_req_complete(req, status);
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
}
}
diff --git a/drivers/nvme/target/io-cmd-file.c b/drivers/nvme/target/io-cmd-file.c
index bc6ebb51b0bf..05453f5d1448 100644
--- a/drivers/nvme/target/io-cmd-file.c
+++ b/drivers/nvme/target/io-cmd-file.c
@@ -49,7 +49,12 @@ int nvmet_file_ns_enable(struct nvmet_ns *ns)
goto err;
ns->size = stat.size;
- ns->blksize_shift = file_inode(ns->file)->i_blkbits;
+ /*
+ * i_blkbits can be greater than the universally accepted upper bound,
+ * so make sure we export a sane namespace lba_shift.
+ */
+ ns->blksize_shift = min_t(u8,
+ file_inode(ns->file)->i_blkbits, 12);
ns->bvec_cache = kmem_cache_create("nvmet-bvec",
NVMET_MAX_MPOOL_BVEC * sizeof(struct bio_vec),
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index b9f623ab01f3..9e211ad6bdd3 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -18,7 +18,7 @@
struct nvme_loop_iod {
struct nvme_request nvme_req;
struct nvme_command cmd;
- struct nvme_completion rsp;
+ struct nvme_completion cqe;
struct nvmet_req req;
struct nvme_loop_queue *queue;
struct work_struct work;
@@ -94,7 +94,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req)
{
struct nvme_loop_queue *queue =
container_of(req->sq, struct nvme_loop_queue, nvme_sq);
- struct nvme_completion *cqe = req->rsp;
+ struct nvme_completion *cqe = req->cqe;
/*
* AEN requests are special as they don't time out and can
@@ -129,20 +129,6 @@ static void nvme_loop_execute_work(struct work_struct *work)
nvmet_req_execute(&iod->req);
}
-static enum blk_eh_timer_return
-nvme_loop_timeout(struct request *rq, bool reserved)
-{
- struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(rq);
-
- /* queue error recovery */
- nvme_reset_ctrl(&iod->queue->ctrl->ctrl);
-
- /* fail with DNR on admin cmd timeout */
- nvme_req(rq)->status = NVME_SC_ABORT_REQ | NVME_SC_DNR;
-
- return BLK_EH_DONE;
-}
-
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
@@ -207,7 +193,7 @@ static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
struct nvme_loop_iod *iod, unsigned int queue_idx)
{
iod->req.cmd = &iod->cmd;
- iod->req.rsp = &iod->rsp;
+ iod->req.cqe = &iod->cqe;
iod->queue = &ctrl->queues[queue_idx];
INIT_WORK(&iod->work, nvme_loop_execute_work);
return 0;
@@ -253,7 +239,6 @@ static const struct blk_mq_ops nvme_loop_mq_ops = {
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_hctx,
- .timeout = nvme_loop_timeout,
};
static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
@@ -261,7 +246,6 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
.complete = nvme_loop_complete_rq,
.init_request = nvme_loop_init_request,
.init_hctx = nvme_loop_init_admin_hctx,
- .timeout = nvme_loop_timeout,
};
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 51e49efd7849..c25d88fc9dec 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -284,7 +284,7 @@ struct nvmet_fabrics_ops {
struct nvmet_req {
struct nvme_command *cmd;
- struct nvme_completion *rsp;
+ struct nvme_completion *cqe;
struct nvmet_sq *sq;
struct nvmet_cq *cq;
struct nvmet_ns *ns;
@@ -322,7 +322,7 @@ extern struct workqueue_struct *buffered_io_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
- req->rsp->result.u32 = cpu_to_le32(result);
+ req->cqe->result.u32 = cpu_to_le32(result);
}
/*
@@ -428,6 +428,7 @@ u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf,
u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len);
u32 nvmet_get_log_page_len(struct nvme_command *cmd);
+u64 nvmet_get_log_page_offset(struct nvme_command *cmd);
extern struct list_head *nvmet_ports;
void nvmet_port_disc_changed(struct nvmet_port *port,
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ef893addf341..36d906a7f70d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -160,7 +160,7 @@ static inline bool nvmet_rdma_need_data_out(struct nvmet_rdma_rsp *rsp)
{
return !nvme_is_write(rsp->req.cmd) &&
rsp->req.transfer_len &&
- !rsp->req.rsp->status &&
+ !rsp->req.cqe->status &&
!(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA);
}
@@ -364,16 +364,17 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r)
{
/* NVMe CQE / RDMA SEND */
- r->req.rsp = kmalloc(sizeof(*r->req.rsp), GFP_KERNEL);
- if (!r->req.rsp)
+ r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL);
+ if (!r->req.cqe)
goto out;
- r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.rsp,
- sizeof(*r->req.rsp), DMA_TO_DEVICE);
+ r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe,
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
if (ib_dma_mapping_error(ndev->device, r->send_sge.addr))
goto out_free_rsp;
- r->send_sge.length = sizeof(*r->req.rsp);
+ r->req.p2p_client = &ndev->device->dev;
+ r->send_sge.length = sizeof(*r->req.cqe);
r->send_sge.lkey = ndev->pd->local_dma_lkey;
r->send_cqe.done = nvmet_rdma_send_done;
@@ -388,7 +389,7 @@ static int nvmet_rdma_alloc_rsp(struct nvmet_rdma_device *ndev,
return 0;
out_free_rsp:
- kfree(r->req.rsp);
+ kfree(r->req.cqe);
out:
return -ENOMEM;
}
@@ -397,8 +398,8 @@ static void nvmet_rdma_free_rsp(struct nvmet_rdma_device *ndev,
struct nvmet_rdma_rsp *r)
{
ib_dma_unmap_single(ndev->device, r->send_sge.addr,
- sizeof(*r->req.rsp), DMA_TO_DEVICE);
- kfree(r->req.rsp);
+ sizeof(*r->req.cqe), DMA_TO_DEVICE);
+ kfree(r->req.cqe);
}
static int
@@ -763,8 +764,6 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->send_sge.addr, cmd->send_sge.length,
DMA_TO_DEVICE);
- cmd->req.p2p_client = &queue->dev->device->dev;
-
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index ad0df786fe93..69b83fa0c76c 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -161,14 +161,14 @@ static inline bool nvmet_tcp_has_data_in(struct nvmet_tcp_cmd *cmd)
static inline bool nvmet_tcp_need_data_in(struct nvmet_tcp_cmd *cmd)
{
- return nvmet_tcp_has_data_in(cmd) && !cmd->req.rsp->status;
+ return nvmet_tcp_has_data_in(cmd) && !cmd->req.cqe->status;
}
static inline bool nvmet_tcp_need_data_out(struct nvmet_tcp_cmd *cmd)
{
return !nvme_is_write(cmd->req.cmd) &&
cmd->req.transfer_len > 0 &&
- !cmd->req.rsp->status;
+ !cmd->req.cqe->status;
}
static inline bool nvmet_tcp_has_inline_data(struct nvmet_tcp_cmd *cmd)
@@ -371,13 +371,14 @@ static void nvmet_setup_c2h_data_pdu(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DATA_PDU;
pdu->hdr.type = nvme_tcp_c2h_data;
- pdu->hdr.flags = NVME_TCP_F_DATA_LAST;
+ pdu->hdr.flags = NVME_TCP_F_DATA_LAST | (queue->nvme_sq.sqhd_disabled ?
+ NVME_TCP_F_DATA_SUCCESS : 0);
pdu->hdr.hlen = sizeof(*pdu);
pdu->hdr.pdo = pdu->hdr.hlen + hdgst;
pdu->hdr.plen =
cpu_to_le32(pdu->hdr.hlen + hdgst +
cmd->req.transfer_len + ddgst);
- pdu->command_id = cmd->req.rsp->command_id;
+ pdu->command_id = cmd->req.cqe->command_id;
pdu->data_length = cpu_to_le32(cmd->req.transfer_len);
pdu->data_offset = cpu_to_le32(cmd->wbytes_done);
@@ -542,8 +543,19 @@ static int nvmet_try_send_data(struct nvmet_tcp_cmd *cmd)
cmd->state = NVMET_TCP_SEND_DDGST;
cmd->offset = 0;
} else {
- nvmet_setup_response_pdu(cmd);
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
+ }
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ kfree(cmd->iov);
+ sgl_free(cmd->req.sg);
}
+
return 1;
}
@@ -619,7 +631,13 @@ static int nvmet_try_send_ddgst(struct nvmet_tcp_cmd *cmd)
return ret;
cmd->offset += ret;
- nvmet_setup_response_pdu(cmd);
+
+ if (queue->nvme_sq.sqhd_disabled) {
+ cmd->queue->snd_cmd = NULL;
+ nvmet_tcp_put_cmd(cmd);
+ } else {
+ nvmet_setup_response_pdu(cmd);
+ }
return 1;
}
@@ -756,12 +774,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return -EPROTO;
}
- if (icreq->maxr2t != 0) {
- pr_err("queue %d: unsupported maxr2t %d\n", queue->idx,
- le32_to_cpu(icreq->maxr2t) + 1);
- return -EPROTO;
- }
-
queue->hdr_digest = !!(icreq->digest & NVME_TCP_HDR_DIGEST_ENABLE);
queue->data_digest = !!(icreq->digest & NVME_TCP_DATA_DIGEST_ENABLE);
if (queue->hdr_digest || queue->data_digest) {
@@ -1206,7 +1218,7 @@ static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue,
sizeof(*c->rsp_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);
if (!c->rsp_pdu)
goto out_free_cmd;
- c->req.rsp = &c->rsp_pdu->cqe;
+ c->req.cqe = &c->rsp_pdu->cqe;
c->data_pdu = page_frag_alloc(&queue->pf_cache,
sizeof(*c->data_pdu) + hdgst, GFP_KERNEL | __GFP_ZERO);