summaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host
diff options
context:
space:
mode:
authorMike Christie <michael.christie@oracle.com>2023-04-07 22:05:46 +0200
committerMartin K. Petersen <martin.petersen@oracle.com>2023-04-12 03:55:36 +0200
commit28c97ba38ff9c00bc177887c2d8568b7115a44e0 (patch)
tree7d5c397dd01b7b2861632fc80bdbba06e673fec2 /drivers/nvme/host
parentnvme: Add a nvme_pr_type enum (diff)
downloadlinux-28c97ba38ff9c00bc177887c2d8568b7115a44e0.tar.xz
linux-28c97ba38ff9c00bc177887c2d8568b7115a44e0.zip
nvme: Add pr_ops read_reservation support
This patch adds support for the pr_ops read_reservation callout by calling the NVMe Reservation Report helper. It then parses that info to detect if there is a reservation and if there is then convert the returned info to a pr_ops pr_held_reservation struct. Signed-off-by: Mike Christie <michael.christie@oracle.com> Link: https://lore.kernel.org/r/20230407200551.12660-14-michael.christie@oracle.com Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/nvme/host')
-rw-r--r--drivers/nvme/host/pr.c83
1 files changed, 83 insertions, 0 deletions
diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
index 732c56b417c2..391b1465ebfd 100644
--- a/drivers/nvme/host/pr.c
+++ b/drivers/nvme/host/pr.c
@@ -29,6 +29,26 @@ static enum nvme_pr_type nvme_pr_type_from_blk(enum pr_type type)
return 0;
}
+static enum pr_type block_pr_type_from_nvme(enum nvme_pr_type type)
+{
+ switch (type) {
+ case NVME_PR_WRITE_EXCLUSIVE:
+ return PR_WRITE_EXCLUSIVE;
+ case NVME_PR_EXCLUSIVE_ACCESS:
+ return PR_EXCLUSIVE_ACCESS;
+ case NVME_PR_WRITE_EXCLUSIVE_REG_ONLY:
+ return PR_WRITE_EXCLUSIVE_REG_ONLY;
+ case NVME_PR_EXCLUSIVE_ACCESS_REG_ONLY:
+ return PR_EXCLUSIVE_ACCESS_REG_ONLY;
+ case NVME_PR_WRITE_EXCLUSIVE_ALL_REGS:
+ return PR_WRITE_EXCLUSIVE_ALL_REGS;
+ case NVME_PR_EXCLUSIVE_ACCESS_ALL_REGS:
+ return PR_EXCLUSIVE_ACCESS_ALL_REGS;
+ }
+
+ return 0;
+}
+
static int nvme_send_ns_head_pr_command(struct block_device *bdev,
struct nvme_command *c, void *data, unsigned int data_len)
{
@@ -222,6 +242,68 @@ free_rse:
return ret;
}
+static int nvme_pr_read_reservation(struct block_device *bdev,
+ struct pr_held_reservation *resv)
+{
+ struct nvme_reservation_status_ext tmp_rse, *rse;
+ int ret, i, num_regs;
+ u32 rse_len;
+ bool eds;
+
+get_num_regs:
+ /*
+ * Get the number of registrations so we know how big to allocate
+ * the response buffer.
+ */
+ ret = nvme_pr_resv_report(bdev, &tmp_rse, sizeof(tmp_rse), &eds);
+ if (ret)
+ return ret;
+
+ num_regs = get_unaligned_le16(&tmp_rse.regctl);
+ if (!num_regs) {
+ resv->generation = le32_to_cpu(tmp_rse.gen);
+ return 0;
+ }
+
+ rse_len = struct_size(rse, regctl_eds, num_regs);
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+ ret = nvme_pr_resv_report(bdev, rse, rse_len, &eds);
+ if (ret)
+ goto free_rse;
+
+ if (num_regs != get_unaligned_le16(&rse->regctl)) {
+ kfree(rse);
+ goto get_num_regs;
+ }
+
+ resv->generation = le32_to_cpu(rse->gen);
+ resv->type = block_pr_type_from_nvme(rse->rtype);
+
+ for (i = 0; i < num_regs; i++) {
+ if (eds) {
+ if (rse->regctl_eds[i].rcsts) {
+ resv->key = le64_to_cpu(rse->regctl_eds[i].rkey);
+ break;
+ }
+ } else {
+ struct nvme_reservation_status *rs;
+
+ rs = (struct nvme_reservation_status *)rse;
+ if (rs->regctl_ds[i].rcsts) {
+ resv->key = le64_to_cpu(rs->regctl_ds[i].rkey);
+ break;
+ }
+ }
+ }
+
+free_rse:
+ kfree(rse);
+ return ret;
+}
+
const struct pr_ops nvme_pr_ops = {
.pr_register = nvme_pr_register,
.pr_reserve = nvme_pr_reserve,
@@ -229,4 +311,5 @@ const struct pr_ops nvme_pr_ops = {
.pr_preempt = nvme_pr_preempt,
.pr_clear = nvme_pr_clear,
.pr_read_keys = nvme_pr_read_keys,
+ .pr_read_reservation = nvme_pr_read_reservation,
};