diff options
author | Keith Busch <keith.busch@intel.com> | 2013-07-15 23:02:19 +0200 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-09-03 22:39:25 +0200 |
commit | 0877cb0d285c7f1d53d0b84b360bdea4be4f3f59 (patch) | |
tree | 3bbad0b26e8cb58d928e4e2d1661da2f22ba29a2 /drivers/block | |
parent | NVMe: Disk stats for read/write commands only (diff) | |
download | linux-0877cb0d285c7f1d53d0b84b360bdea4be4f3f59.tar.xz linux-0877cb0d285c7f1d53d0b84b360bdea4be4f3f59.zip |
NVMe: Group pci related actions in functions
This will make it easier to reuse these outside probe/remove.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block')
-rw-r--r-- | drivers/block/nvme-core.c | 112 |
1 files changed, 66 insertions, 46 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 360ac5d32d26..a93f52c48036 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1191,9 +1191,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) u64 cap = readq(&dev->bar->cap); struct nvme_queue *nvmeq; - dev->dbs = ((void __iomem *)dev->bar) + 4096; - dev->db_stride = NVME_CAP_STRIDE(cap); - result = nvme_disable_ctrl(dev, cap); if (result < 0) return result; @@ -1832,6 +1829,61 @@ static int nvme_dev_add(struct nvme_dev *dev) return res; } +static int nvme_dev_map(struct nvme_dev *dev) +{ + int bars, result = -ENOMEM; + struct pci_dev *pdev = dev->pci_dev; + + if (pci_enable_device_mem(pdev)) + return result; + + dev->entry[0].vector = pdev->irq; + pci_set_master(pdev); + bars = pci_select_bars(pdev, IORESOURCE_MEM); + if (pci_request_selected_regions(pdev, bars, "nvme")) + goto disable_pci; + + if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); + else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) + dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); + else + goto disable_pci; + + pci_set_drvdata(pdev, dev); + dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); + if (!dev->bar) + goto disable; + + dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); + dev->dbs = ((void __iomem *)dev->bar) + 4096; + + return 0; + + disable: + pci_release_regions(pdev); + disable_pci: + pci_disable_device(pdev); + return result; +} + +static void nvme_dev_unmap(struct nvme_dev *dev) +{ + if (dev->pci_dev->msi_enabled) + pci_disable_msi(dev->pci_dev); + else if (dev->pci_dev->msix_enabled) + pci_disable_msix(dev->pci_dev); + + if (dev->bar) { + iounmap(dev->bar); + dev->bar = NULL; + } + + pci_release_regions(dev->pci_dev); + if (pci_is_enabled(dev->pci_dev)) + pci_disable_device(dev->pci_dev); +} + static int nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns, *next; @@ -1908,15 +1960,9 @@ static void nvme_free_dev(struct kref *kref) { struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); nvme_dev_remove(dev); - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); - iounmap(dev->bar); + nvme_dev_unmap(dev); nvme_release_instance(dev); nvme_release_prp_pools(dev); - pci_disable_device(dev->pci_dev); - pci_release_regions(dev->pci_dev); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -1959,7 +2005,7 @@ static const struct file_operations nvme_dev_fops = { static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int bars, result = -ENOMEM; + int result = -ENOMEM; struct nvme_dev *dev; dev = kzalloc(sizeof(*dev), GFP_KERNEL); @@ -1974,39 +2020,19 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!dev->queues) goto free; - if (pci_enable_device_mem(pdev)) - goto free; - pci_set_master(pdev); - bars = pci_select_bars(pdev, IORESOURCE_MEM); - if (pci_request_selected_regions(pdev, bars, "nvme")) - goto disable; - INIT_LIST_HEAD(&dev->namespaces); dev->pci_dev = pdev; - pci_set_drvdata(pdev, dev); - - if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) - dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - else - goto disable; - result = nvme_set_instance(dev); if (result) - goto disable; - - dev->entry[0].vector = pdev->irq; + goto free; result = nvme_setup_prp_pools(dev); if (result) - goto disable_msix; + goto release; - dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); - if (!dev->bar) { - result = -ENOMEM; - goto disable_msix; - } + result = nvme_dev_map(dev); + if (result) + goto release_pools; result = nvme_configure_admin_queue(dev); if (result) @@ -2042,17 +2068,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) nvme_free_queues(dev); unmap: - iounmap(dev->bar); - disable_msix: - if (dev->pci_dev->msi_enabled) - pci_disable_msi(dev->pci_dev); - else if (dev->pci_dev->msix_enabled) - pci_disable_msix(dev->pci_dev); - nvme_release_instance(dev); + nvme_dev_unmap(dev); + release_pools: nvme_release_prp_pools(dev); - disable: - pci_disable_device(pdev); - pci_release_regions(pdev); + release: + nvme_release_instance(dev); free: kfree(dev->queues); kfree(dev->entry); |