diff options
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r-- | drivers/nvme/host/core.c | 22 | ||||
-rw-r--r-- | drivers/nvme/host/ioctl.c | 12 | ||||
-rw-r--r-- | drivers/nvme/host/multipath.c | 21 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 10 | ||||
-rw-r--r-- | drivers/nvme/host/pci.c | 147 | ||||
-rw-r--r-- | drivers/nvme/host/pr.c | 122 | ||||
-rw-r--r-- | drivers/nvme/host/rdma.c | 4 |
7 files changed, 256 insertions, 82 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 1a8d32a4a5c3..40e7be3b0339 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1305,9 +1305,10 @@ static void nvme_queue_keep_alive_work(struct nvme_ctrl *ctrl) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); } -static void nvme_keep_alive_finish(struct request *rq, - blk_status_t status, struct nvme_ctrl *ctrl) +static enum rq_end_io_ret nvme_keep_alive_end_io(struct request *rq, + blk_status_t status) { + struct nvme_ctrl *ctrl = rq->end_io_data; unsigned long rtt = jiffies - (rq->deadline - rq->timeout); unsigned long delay = nvme_keep_alive_work_period(ctrl); enum nvme_ctrl_state state = nvme_ctrl_state(ctrl); @@ -1324,17 +1325,20 @@ static void nvme_keep_alive_finish(struct request *rq, delay = 0; } + blk_mq_free_request(rq); + if (status) { dev_err(ctrl->device, "failed nvme_keep_alive_end_io error=%d\n", status); - return; + return RQ_END_IO_NONE; } ctrl->ka_last_check_time = jiffies; ctrl->comp_seen = false; if (state == NVME_CTRL_LIVE || state == NVME_CTRL_CONNECTING) queue_delayed_work(nvme_wq, &ctrl->ka_work, delay); + return RQ_END_IO_NONE; } static void nvme_keep_alive_work(struct work_struct *work) @@ -1343,7 +1347,6 @@ static void nvme_keep_alive_work(struct work_struct *work) struct nvme_ctrl, ka_work); bool comp_seen = ctrl->comp_seen; struct request *rq; - blk_status_t status; ctrl->ka_last_check_time = jiffies; @@ -1366,9 +1369,9 @@ static void nvme_keep_alive_work(struct work_struct *work) nvme_init_request(rq, &ctrl->ka_cmd); rq->timeout = ctrl->kato * HZ; - status = blk_execute_rq(rq, false); - nvme_keep_alive_finish(rq, status, ctrl); - blk_mq_free_request(rq); + rq->end_io = nvme_keep_alive_end_io; + rq->end_io_data = ctrl; + blk_execute_rq_nowait(rq, false); } static void nvme_start_keep_alive(struct nvme_ctrl *ctrl) @@ -4600,6 +4603,11 @@ EXPORT_SYMBOL_GPL(nvme_alloc_admin_tag_set); void nvme_remove_admin_tag_set(struct nvme_ctrl *ctrl) { + /* + * As we're about to destroy the queue and free tagset + * we can not have keep-alive work running. + */ + nvme_stop_keep_alive(ctrl); blk_mq_destroy_queue(ctrl->admin_q); blk_put_queue(ctrl->admin_q); if (ctrl->ops->flags & NVME_F_FABRICS) { diff --git a/drivers/nvme/host/ioctl.c b/drivers/nvme/host/ioctl.c index 6522ae16531c..e8930146847a 100644 --- a/drivers/nvme/host/ioctl.c +++ b/drivers/nvme/host/ioctl.c @@ -120,12 +120,20 @@ static int nvme_map_user_request(struct request *req, u64 ubuffer, struct nvme_ns *ns = q->queuedata; struct block_device *bdev = ns ? ns->disk->part0 : NULL; bool supports_metadata = bdev && blk_get_integrity(bdev->bd_disk); + struct nvme_ctrl *ctrl = nvme_req(req)->ctrl; bool has_metadata = meta_buffer && meta_len; struct bio *bio = NULL; int ret; - if (has_metadata && !supports_metadata) - return -EINVAL; + if (!nvme_ctrl_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, "using unchecked data buffer\n"); + if (has_metadata) { + if (!supports_metadata) + return -EINVAL; + if (!nvme_ctrl_meta_sgl_supported(ctrl)) + dev_warn_once(ctrl->device, + "using unchecked metadata buffer\n"); + } if (ioucmd && (ioucmd->flags & IORING_URING_CMD_FIXED)) { struct iov_iter iter; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index f04cfe3fb936..a85d190942bd 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -165,7 +165,8 @@ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { if (!ns->head->disk) continue; kblockd_schedule_work(&ns->head->requeue_work); @@ -209,7 +210,8 @@ void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) int srcu_idx; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { nvme_mpath_clear_current_path(ns); kblockd_schedule_work(&ns->head->requeue_work); } @@ -224,7 +226,8 @@ void nvme_mpath_revalidate_paths(struct nvme_ns *ns) int srcu_idx; srcu_idx = srcu_read_lock(&head->srcu); - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (capacity != get_capacity(ns->disk)) clear_bit(NVME_NS_READY, &ns->flags); } @@ -257,7 +260,8 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) int found_distance = INT_MAX, fallback_distance = INT_MAX, distance; struct nvme_ns *found = NULL, *fallback = NULL, *ns; - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (nvme_path_is_disabled(ns)) continue; @@ -356,7 +360,8 @@ static struct nvme_ns *nvme_queue_depth_path(struct nvme_ns_head *head) unsigned int min_depth_opt = UINT_MAX, min_depth_nonopt = UINT_MAX; unsigned int depth; - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (nvme_path_is_disabled(ns)) continue; @@ -424,7 +429,8 @@ static bool nvme_available_path(struct nvme_ns_head *head) if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) return NULL; - list_for_each_entry_rcu(ns, &head->list, siblings) { + list_for_each_entry_srcu(ns, &head->list, siblings, + srcu_read_lock_held(&head->srcu)) { if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) continue; switch (nvme_ctrl_state(ns->ctrl)) { @@ -783,7 +789,8 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl, return 0; srcu_idx = srcu_read_lock(&ctrl->srcu); - list_for_each_entry_rcu(ns, &ctrl->namespaces, list) { + list_for_each_entry_srcu(ns, &ctrl->namespaces, list, + srcu_read_lock_held(&ctrl->srcu)) { unsigned nsid; again: nsid = le32_to_cpu(desc->nsids[n]); diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 900719c4c70c..611b02c8a8b3 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -1123,7 +1123,15 @@ static inline void nvme_start_request(struct request *rq) static inline bool nvme_ctrl_sgl_supported(struct nvme_ctrl *ctrl) { - return ctrl->sgls & ((1 << 0) | (1 << 1)); + return ctrl->sgls & (NVME_CTRL_SGLS_BYTE_ALIGNED | + NVME_CTRL_SGLS_DWORD_ALIGNED); +} + +static inline bool nvme_ctrl_meta_sgl_supported(struct nvme_ctrl *ctrl) +{ + if (ctrl->ops->flags & NVME_F_FABRICS) + return true; + return ctrl->sgls & NVME_CTRL_SGLS_MSDS; } #ifdef CONFIG_NVME_HOST_AUTH diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5f2e3ad2cc52..4c644bb7f069 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -43,6 +43,7 @@ */ #define NVME_MAX_KB_SZ 8192 #define NVME_MAX_SEGS 128 +#define NVME_MAX_META_SEGS 15 #define NVME_MAX_NR_ALLOCATIONS 5 static int use_threaded_interrupts; @@ -144,6 +145,7 @@ struct nvme_dev { struct sg_table *hmb_sgt; mempool_t *iod_mempool; + mempool_t *iod_meta_mempool; /* shadow doorbell buffer support: */ __le32 *dbbuf_dbs; @@ -239,6 +241,8 @@ struct nvme_iod { dma_addr_t first_dma; dma_addr_t meta_dma; struct sg_table sgt; + struct sg_table meta_sgt; + union nvme_descriptor meta_list; union nvme_descriptor list[NVME_MAX_NR_ALLOCATIONS]; }; @@ -506,6 +510,15 @@ static void nvme_commit_rqs(struct blk_mq_hw_ctx *hctx) spin_unlock(&nvmeq->sq_lock); } +static inline bool nvme_pci_metadata_use_sgls(struct nvme_dev *dev, + struct request *req) +{ + if (!nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + return false; + return req->nr_integrity_segments > 1 || + nvme_req(req)->flags & NVME_REQ_USERCMD; +} + static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, int nseg) { @@ -518,8 +531,10 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req, return false; if (!nvmeq->qid) return false; + if (nvme_pci_metadata_use_sgls(dev, req)) + return true; if (!sgl_threshold || avg_seg_size < sgl_threshold) - return false; + return nvme_req(req)->flags & NVME_REQ_USERCMD; return true; } @@ -780,7 +795,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, struct bio_vec bv = req_bvec(req); if (!is_pci_p2pdma_page(bv.bv_page)) { - if ((bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + + if (!nvme_pci_metadata_use_sgls(dev, req) && + (bv.bv_offset & (NVME_CTRL_PAGE_SIZE - 1)) + bv.bv_len <= NVME_CTRL_PAGE_SIZE * 2) return nvme_setup_prp_simple(dev, req, &cmnd->rw, &bv); @@ -824,11 +840,69 @@ out_free_sg: return ret; } -static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, - struct nvme_command *cmnd) +static blk_status_t nvme_pci_setup_meta_sgls(struct nvme_dev *dev, + struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + struct nvme_rw_command *cmnd = &iod->cmd.rw; + struct nvme_sgl_desc *sg_list; + struct scatterlist *sgl, *sg; + unsigned int entries; + dma_addr_t sgl_dma; + int rc, i; + + iod->meta_sgt.sgl = mempool_alloc(dev->iod_meta_mempool, GFP_ATOMIC); + if (!iod->meta_sgt.sgl) + return BLK_STS_RESOURCE; + + sg_init_table(iod->meta_sgt.sgl, req->nr_integrity_segments); + iod->meta_sgt.orig_nents = blk_rq_map_integrity_sg(req, + iod->meta_sgt.sgl); + if (!iod->meta_sgt.orig_nents) + goto out_free_sg; + + rc = dma_map_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), + DMA_ATTR_NO_WARN); + if (rc) + goto out_free_sg; + + sg_list = dma_pool_alloc(dev->prp_small_pool, GFP_ATOMIC, &sgl_dma); + if (!sg_list) + goto out_unmap_sg; + + entries = iod->meta_sgt.nents; + iod->meta_list.sg_list = sg_list; + iod->meta_dma = sgl_dma; + + cmnd->flags = NVME_CMD_SGL_METASEG; + cmnd->metadata = cpu_to_le64(sgl_dma); + + sgl = iod->meta_sgt.sgl; + if (entries == 1) { + nvme_pci_sgl_set_data(sg_list, sgl); + return BLK_STS_OK; + } + + sgl_dma += sizeof(*sg_list); + nvme_pci_sgl_set_seg(sg_list, sgl_dma, entries); + for_each_sg(sgl, sg, entries, i) + nvme_pci_sgl_set_data(&sg_list[i + 1], sg); + + return BLK_STS_OK; + +out_unmap_sg: + dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); +out_free_sg: + mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); + return BLK_STS_RESOURCE; +} + +static blk_status_t nvme_pci_setup_meta_mptr(struct nvme_dev *dev, + struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct bio_vec bv = rq_integrity_vec(req); + struct nvme_command *cmnd = &iod->cmd; iod->meta_dma = dma_map_bvec(dev->dev, &bv, rq_dma_dir(req), 0); if (dma_mapping_error(dev->dev, iod->meta_dma)) @@ -837,6 +911,13 @@ static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req, return BLK_STS_OK; } +static blk_status_t nvme_map_metadata(struct nvme_dev *dev, struct request *req) +{ + if (nvme_pci_metadata_use_sgls(dev, req)) + return nvme_pci_setup_meta_sgls(dev, req); + return nvme_pci_setup_meta_mptr(dev, req); +} + static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); @@ -845,6 +926,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) iod->aborted = false; iod->nr_allocations = -1; iod->sgt.nents = 0; + iod->meta_sgt.nents = 0; ret = nvme_setup_cmd(req->q->queuedata, req); if (ret) @@ -857,7 +939,7 @@ static blk_status_t nvme_prep_rq(struct nvme_dev *dev, struct request *req) } if (blk_integrity_rq(req)) { - ret = nvme_map_metadata(dev, req, &iod->cmd); + ret = nvme_map_metadata(dev, req); if (ret) goto out_unmap_data; } @@ -955,17 +1037,31 @@ static void nvme_queue_rqs(struct rq_list *rqlist) *rqlist = requeue_list; } +static __always_inline void nvme_unmap_metadata(struct nvme_dev *dev, + struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + + if (!iod->meta_sgt.nents) { + dma_unmap_page(dev->dev, iod->meta_dma, + rq_integrity_vec(req).bv_len, + rq_dma_dir(req)); + return; + } + + dma_pool_free(dev->prp_small_pool, iod->meta_list.sg_list, + iod->meta_dma); + dma_unmap_sgtable(dev->dev, &iod->meta_sgt, rq_dma_dir(req), 0); + mempool_free(iod->meta_sgt.sgl, dev->iod_meta_mempool); +} + static __always_inline void nvme_pci_unmap_rq(struct request *req) { struct nvme_queue *nvmeq = req->mq_hctx->driver_data; struct nvme_dev *dev = nvmeq->dev; - if (blk_integrity_rq(req)) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - - dma_unmap_page(dev->dev, iod->meta_dma, - rq_integrity_vec(req).bv_len, rq_dma_dir(req)); - } + if (blk_integrity_rq(req)) + nvme_unmap_metadata(dev, req); if (blk_rq_nr_phys_segments(req)) nvme_unmap_data(dev, req); @@ -2761,6 +2857,7 @@ static void nvme_release_prp_pools(struct nvme_dev *dev) static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) { + size_t meta_size = sizeof(struct scatterlist) * (NVME_MAX_META_SEGS + 1); size_t alloc_size = sizeof(struct scatterlist) * NVME_MAX_SEGS; dev->iod_mempool = mempool_create_node(1, @@ -2769,7 +2866,18 @@ static int nvme_pci_alloc_iod_mempool(struct nvme_dev *dev) dev_to_node(dev->dev)); if (!dev->iod_mempool) return -ENOMEM; + + dev->iod_meta_mempool = mempool_create_node(1, + mempool_kmalloc, mempool_kfree, + (void *)meta_size, GFP_KERNEL, + dev_to_node(dev->dev)); + if (!dev->iod_meta_mempool) + goto free; + return 0; +free: + mempool_destroy(dev->iod_mempool); + return -ENOMEM; } static void nvme_free_tagset(struct nvme_dev *dev) @@ -2834,6 +2942,11 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); @@ -3101,11 +3214,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, dev->ctrl.max_hw_sectors = min_t(u32, NVME_MAX_KB_SZ << 1, dma_opt_mapping_size(&pdev->dev) >> 9); dev->ctrl.max_segments = NVME_MAX_SEGS; - - /* - * There is no support for SGLs for metadata (yet), so we are limited to - * a single integrity segment for the separate metadata pointer. - */ dev->ctrl.max_integrity_segments = 1; return dev; @@ -3168,6 +3276,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto out_disable; + if (nvme_ctrl_meta_sgl_supported(&dev->ctrl)) + dev->ctrl.max_integrity_segments = NVME_MAX_META_SEGS; + else + dev->ctrl.max_integrity_segments = 1; + nvme_dbbuf_dma_alloc(dev); result = nvme_setup_host_mem(dev); @@ -3210,6 +3323,7 @@ out_disable: nvme_free_queues(dev, 0); out_release_iod_mempool: mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->iod_meta_mempool); out_release_prp_pools: nvme_release_prp_pools(dev); out_dev_unmap: @@ -3275,6 +3389,7 @@ static void nvme_remove(struct pci_dev *pdev) nvme_dbbuf_dma_free(dev); nvme_free_queues(dev, 0); mempool_destroy(dev->iod_mempool); + mempool_destroy(dev->iod_meta_mempool); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); nvme_uninit_ctrl(&dev->ctrl); diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c index dc7922f22600..cf2d2c5039dd 100644 --- a/drivers/nvme/host/pr.c +++ b/drivers/nvme/host/pr.c @@ -94,109 +94,137 @@ static int nvme_status_to_pr_err(int status) } } -static int nvme_send_pr_command(struct block_device *bdev, - struct nvme_command *c, void *data, unsigned int data_len) +static int __nvme_send_pr_command(struct block_device *bdev, u32 cdw10, + u32 cdw11, u8 op, void *data, unsigned int data_len) { - if (nvme_disk_is_ns_head(bdev->bd_disk)) - return nvme_send_ns_head_pr_command(bdev, c, data, data_len); + struct nvme_command c = { 0 }; - return nvme_send_ns_pr_command(bdev->bd_disk->private_data, c, data, - data_len); + c.common.opcode = op; + c.common.cdw10 = cpu_to_le32(cdw10); + c.common.cdw11 = cpu_to_le32(cdw11); + + if (nvme_disk_is_ns_head(bdev->bd_disk)) + return nvme_send_ns_head_pr_command(bdev, &c, data, data_len); + return nvme_send_ns_pr_command(bdev->bd_disk->private_data, &c, + data, data_len); } -static int nvme_pr_command(struct block_device *bdev, u32 cdw10, - u64 key, u64 sa_key, u8 op) +static int nvme_send_pr_command(struct block_device *bdev, u32 cdw10, u32 cdw11, + u8 op, void *data, unsigned int data_len) { - struct nvme_command c = { }; - u8 data[16] = { 0, }; int ret; - put_unaligned_le64(key, &data[0]); - put_unaligned_le64(sa_key, &data[8]); - - c.common.opcode = op; - c.common.cdw10 = cpu_to_le32(cdw10); - - ret = nvme_send_pr_command(bdev, &c, data, sizeof(data)); - if (ret < 0) - return ret; - - return nvme_status_to_pr_err(ret); + ret = __nvme_send_pr_command(bdev, cdw10, cdw11, op, data, data_len); + return ret < 0 ? ret : nvme_status_to_pr_err(ret); } -static int nvme_pr_register(struct block_device *bdev, u64 old, - u64 new, unsigned flags) +static int nvme_pr_register(struct block_device *bdev, u64 old_key, u64 new_key, + unsigned int flags) { + struct nvmet_pr_register_data data = { 0 }; u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; - cdw10 = old ? 2 : 0; - cdw10 |= (flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0; - cdw10 |= (1 << 30) | (1 << 31); /* PTPL=1 */ - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_register); + data.crkey = cpu_to_le64(old_key); + data.nrkey = cpu_to_le64(new_key); + + cdw10 = old_key ? NVME_PR_REGISTER_ACT_REPLACE : + NVME_PR_REGISTER_ACT_REG; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0; + cdw10 |= NVME_PR_CPTPL_PERSIST; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_register, + &data, sizeof(data)); } static int nvme_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type, unsigned flags) { + struct nvmet_pr_acquire_data data = { 0 }; u32 cdw10; if (flags & ~PR_FL_IGNORE_KEY) return -EOPNOTSUPP; - cdw10 = nvme_pr_type_from_blk(type) << 8; - cdw10 |= ((flags & PR_FL_IGNORE_KEY) ? 1 << 3 : 0); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_acquire); + data.crkey = cpu_to_le64(key); + + cdw10 = NVME_PR_ACQUIRE_ACT_ACQUIRE; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + cdw10 |= (flags & PR_FL_IGNORE_KEY) ? NVME_PR_IGNORE_KEY : 0; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire, + &data, sizeof(data)); } static int nvme_pr_preempt(struct block_device *bdev, u64 old, u64 new, enum pr_type type, bool abort) { - u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (abort ? 2 : 1); + struct nvmet_pr_acquire_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(old); + data.prkey = cpu_to_le64(new); - return nvme_pr_command(bdev, cdw10, old, new, nvme_cmd_resv_acquire); + cdw10 = abort ? NVME_PR_ACQUIRE_ACT_PREEMPT_AND_ABORT : + NVME_PR_ACQUIRE_ACT_PREEMPT; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_acquire, + &data, sizeof(data)); } static int nvme_pr_clear(struct block_device *bdev, u64 key) { - u32 cdw10 = 1 | (key ? 0 : 1 << 3); + struct nvmet_pr_release_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(key); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); + cdw10 = NVME_PR_RELEASE_ACT_CLEAR; + cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release, + &data, sizeof(data)); } static int nvme_pr_release(struct block_device *bdev, u64 key, enum pr_type type) { - u32 cdw10 = nvme_pr_type_from_blk(type) << 8 | (key ? 0 : 1 << 3); + struct nvmet_pr_release_data data = { 0 }; + u32 cdw10; + + data.crkey = cpu_to_le64(key); - return nvme_pr_command(bdev, cdw10, key, 0, nvme_cmd_resv_release); + cdw10 = NVME_PR_RELEASE_ACT_RELEASE; + cdw10 |= nvme_pr_type_from_blk(type) << 8; + cdw10 |= key ? 0 : NVME_PR_IGNORE_KEY; + + return nvme_send_pr_command(bdev, cdw10, 0, nvme_cmd_resv_release, + &data, sizeof(data)); } static int nvme_pr_resv_report(struct block_device *bdev, void *data, u32 data_len, bool *eds) { - struct nvme_command c = { }; + u32 cdw10, cdw11; int ret; - c.common.opcode = nvme_cmd_resv_report; - c.common.cdw10 = cpu_to_le32(nvme_bytes_to_numd(data_len)); - c.common.cdw11 = cpu_to_le32(NVME_EXTENDED_DATA_STRUCT); + cdw10 = nvme_bytes_to_numd(data_len); + cdw11 = NVME_EXTENDED_DATA_STRUCT; *eds = true; retry: - ret = nvme_send_pr_command(bdev, &c, data, data_len); + ret = __nvme_send_pr_command(bdev, cdw10, cdw11, nvme_cmd_resv_report, + data, data_len); if (ret == NVME_SC_HOST_ID_INCONSIST && - c.common.cdw11 == cpu_to_le32(NVME_EXTENDED_DATA_STRUCT)) { - c.common.cdw11 = 0; + cdw11 == NVME_EXTENDED_DATA_STRUCT) { + cdw11 = 0; *eds = false; goto retry; } - if (ret < 0) - return ret; - - return nvme_status_to_pr_err(ret); + return ret < 0 ? ret : nvme_status_to_pr_err(ret); } static int nvme_pr_read_keys(struct block_device *bdev, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 24a2759798d0..baf7d2490152 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1019,7 +1019,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) goto destroy_admin; } - if (!(ctrl->ctrl.sgls & (1 << 2))) { + if (!(ctrl->ctrl.sgls & NVME_CTRL_SGLS_KSDBDS)) { ret = -EOPNOTSUPP; dev_err(ctrl->ctrl.device, "Mandatory keyed sgls are not supported!\n"); @@ -1051,7 +1051,7 @@ static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; } - if (ctrl->ctrl.sgls & (1 << 20)) + if (ctrl->ctrl.sgls & NVME_CTRL_SGLS_SAOS) ctrl->use_inline_data = true; if (ctrl->ctrl.queue_count > 1) { |