diff options
author | Matias Bjørling <m@bjorling.me> | 2016-09-16 14:25:07 +0200 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-21 15:56:18 +0200 |
commit | b0b4e09c1ae71c4ec33df0616b830ae050006e9b (patch) | |
tree | 67a148740845d77af662b36ec17d88f991c84d72 /drivers/nvme | |
parent | blk-mq: register device instead of disk (diff) | |
download | linux-b0b4e09c1ae71c4ec33df0616b830ae050006e9b.tar.xz linux-b0b4e09c1ae71c4ec33df0616b830ae050006e9b.zip |
lightnvm: control life of nvm_dev in driver
LightNVM compatible device drivers does not have a method to expose
LightNVM specific sysfs entries.
To enable LightNVM sysfs entries to be exposed, lightnvm device
drivers require a struct device to attach it to. To allow both the
actual device driver and lightnvm sysfs entries to coexist, the device
driver tracks the lifetime of the nvm_dev structure.
This patch refactors NVMe and null_blk to handle the lifetime of struct
nvm_dev, which eliminates the need for struct gendisk when a lightnvm
compatible device is provided.
Signed-off-by: Matias Bjørling <m@bjorling.me>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/nvme')
-rw-r--r-- | drivers/nvme/host/core.c | 36 | ||||
-rw-r--r-- | drivers/nvme/host/lightnvm.c | 31 | ||||
-rw-r--r-- | drivers/nvme/host/nvme.h | 12 |
3 files changed, 46 insertions, 33 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2c3da3315a02..3c707d83b1da 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -156,12 +156,14 @@ static void nvme_free_ns(struct kref *kref) { struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); - if (ns->type == NVME_NS_LIGHTNVM) - nvme_nvm_unregister(ns->queue, ns->disk->disk_name); + if (ns->ndev) + nvme_nvm_unregister(ns); - spin_lock(&dev_list_lock); - ns->disk->private_data = NULL; - spin_unlock(&dev_list_lock); + if (ns->disk) { + spin_lock(&dev_list_lock); + ns->disk->private_data = NULL; + spin_unlock(&dev_list_lock); + } put_disk(ns->disk); ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); @@ -891,8 +893,7 @@ static void nvme_config_discard(struct nvme_ns *ns) static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) { if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { - dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", - __func__); + dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); return -ENODEV; } @@ -1683,18 +1684,11 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) goto out_free_queue; if (nvme_nvm_ns_supported(ns, id)) { - if (nvme_nvm_register(ns->queue, disk_name)) { - dev_warn(ctrl->dev, - "%s: LightNVM init failure\n", __func__); + if (nvme_nvm_register(ns, disk_name, node)) { + dev_warn(ctrl->dev, "%s: LightNVM init failure\n", + __func__); goto out_free_id; } - - disk = alloc_disk_node(0, node); - if (!disk) - goto out_free_id; - memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); - ns->disk = disk; - ns->type = NVME_NS_LIGHTNVM; } else { disk = alloc_disk_node(0, node); if (!disk) @@ -1718,7 +1712,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) kfree(id); - if (ns->type == NVME_NS_LIGHTNVM) + if (ns->ndev) return; device_add_disk(ctrl->device, ns->disk); @@ -1742,7 +1736,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return; - if (ns->disk->flags & GENHD_FL_UP) { + if (ns->disk && ns->disk->flags & GENHD_FL_UP) { if (blk_get_integrity(ns->disk)) blk_integrity_unregister(ns->disk); sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, @@ -1765,7 +1759,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns = nvme_find_get_ns(ctrl, nsid); if (ns) { - if (revalidate_disk(ns->disk)) + if (ns->disk && revalidate_disk(ns->disk)) nvme_ns_remove(ns); nvme_put_ns(ns); } else @@ -2070,7 +2064,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) * Revalidating a dead namespace sets capacity to 0. This will * end buffered writers dirtying pages that can't be synced. */ - if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags)) + if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags)) revalidate_disk(ns->disk); blk_set_queue_dying(ns->queue); diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 7268a7a1a19a..798fcd9f5d1f 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -474,9 +474,8 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, c->ph_rw.length = cpu_to_le16(rqd->nr_ppas - 1); if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD) - /* momentarily hardcode the shift configuration. lba_shift from - * nvm_dev will be available in a follow-up patch */ - c->hb_rw.slba = cpu_to_le64(rqd->bio->bi_iter.bi_sector >> 3); + c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns, + rqd->bio->bi_iter.bi_sector)); } static void nvme_nvm_end_io(struct request *rq, int error) @@ -593,14 +592,32 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .max_phys_sect = 64, }; -int nvme_nvm_register(struct request_queue *q, char *disk_name) +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node) { - return nvm_register(q, disk_name, &nvme_nvm_dev_ops); + struct request_queue *q = ns->queue; + struct nvm_dev *dev; + int ret; + + dev = nvm_alloc_dev(node); + if (!dev) + return -ENOMEM; + + dev->q = q; + memcpy(dev->name, disk_name, DISK_NAME_LEN); + dev->ops = &nvme_nvm_dev_ops; + ns->ndev = dev; + + ret = nvm_register(dev); + + ns->lba_shift = ilog2(dev->sec_size) - 9; + + return ret; } -void nvme_nvm_unregister(struct request_queue *q, char *disk_name) +void nvme_nvm_unregister(struct nvme_ns *ns) { - nvm_unregister(disk_name); + nvm_unregister(ns->ndev); + kfree(ns->ndev); } /* move to shared place when used in multiple places. */ diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab18b78102bf..e0535c14e538 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -18,6 +18,7 @@ #include <linux/pci.h> #include <linux/kref.h> #include <linux/blk-mq.h> +#include <linux/lightnvm.h> enum { /* @@ -154,6 +155,7 @@ struct nvme_ns { struct nvme_ctrl *ctrl; struct request_queue *queue; struct gendisk *disk; + struct nvm_dev *ndev; struct kref kref; int instance; @@ -165,7 +167,6 @@ struct nvme_ns { u16 ms; bool ext; u8 pi_type; - int type; unsigned long flags; #define NVME_NS_REMOVING 0 @@ -307,15 +308,16 @@ int nvme_sg_get_version_num(int __user *ip); #ifdef CONFIG_NVM int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); -int nvme_nvm_register(struct request_queue *q, char *disk_name); -void nvme_nvm_unregister(struct request_queue *q, char *disk_name); +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node); +void nvme_nvm_unregister(struct nvme_ns *ns); #else -static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) +static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, + int node) { return 0; } -static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; +static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) { |