diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-17 21:52:23 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-17 21:52:23 +0100 |
commit | 6daa90439e91bb9a71864b02f7d0af8587ea889a (patch) | |
tree | cd747a40f9aac87089d3d73de9d42c5863fe7896 /drivers/dma/idxd/init.c | |
parent | Merge tag 'mailbox-v5.11' of git://git.linaro.org/landing-teams/working/fujit... (diff) | |
parent | soc: ti: k3-ringacc: Use correct error casting in k3_ringacc_dmarings_init (diff) | |
download | linux-6daa90439e91bb9a71864b02f7d0af8587ea889a.tar.xz linux-6daa90439e91bb9a71864b02f7d0af8587ea889a.zip |
Merge tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine
Pull dmaengine updates from Vinod Koul:
"The last dmaengine updates for this year :)
This contains couple of new drivers, new device support and updates to
bunch of drivers.
New drivers/devices:
- Qualcomm ADM driver
- Qualcomm GPI driver
- Allwinner A100 DMA support
- Microchip Sama7g5 support
- Mediatek MT8516 apdma
Updates:
- more updates to idxd driver and support for IAX config
- runtime PM support for dw driver
- TI drivers"
* tag 'dmaengine-5.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (75 commits)
soc: ti: k3-ringacc: Use correct error casting in k3_ringacc_dmarings_init
dmaengine: ti: k3-udma-glue: Add support for K3 PKTDMA
dmaengine: ti: k3-udma: Initial support for K3 PKTDMA
dmaengine: ti: k3-udma: Add support for BCDMA channel TPL handling
dmaengine: ti: k3-udma: Initial support for K3 BCDMA
soc: ti: k3-ringacc: add AM64 DMA rings support.
dmaengine: ti: Add support for k3 event routers
dmaengine: ti: k3-psil: Add initial map for AM64
dmaengine: ti: k3-psil: Extend psil_endpoint_config for K3 PKTDMA
dt-bindings: dma: ti: Add document for K3 PKTDMA
dt-bindings: dma: ti: Add document for K3 BCDMA
dmaengine: dmatest: Use dmaengine_get_dma_device
dmaengine: doc: client: Update for dmaengine_get_dma_device() usage
dmaengine: Add support for per channel coherency handling
dmaengine: of-dma: Add support for optional router configuration callback
dmaengine: ti: k3-udma-glue: Configure the dma_dev for rings
dmaengine: ti: k3-udma-glue: Get the ringacc from udma_dev
dmaengine: ti: k3-udma-glue: Add function to get device pointer for DMA API
dmaengine: ti: k3-udma: Add support for second resource range from sysfw
dmaengine: ti: k3-udma: Wait for peer teardown completion if supported
...
Diffstat (limited to 'drivers/dma/idxd/init.c')
-rw-r--r-- | drivers/dma/idxd/init.c | 123 |
1 files changed, 93 insertions, 30 deletions
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 0a4432b063b5..2c051e07c34c 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -14,6 +14,8 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/device.h> #include <linux/idr.h> +#include <linux/intel-svm.h> +#include <linux/iommu.h> #include <uapi/linux/idxd.h> #include <linux/dmaengine.h> #include "../dmaengine.h" @@ -26,18 +28,24 @@ MODULE_AUTHOR("Intel Corporation"); #define DRV_NAME "idxd" +bool support_enqcmd; + static struct idr idxd_idrs[IDXD_TYPE_MAX]; static struct mutex idxd_idr_lock; static struct pci_device_id idxd_pci_tbl[] = { /* DSA ver 1.0 platforms */ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + + /* IAX ver 1.0 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IAX_SPR0) }, { 0, } }; MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); static char *idxd_name[] = { "dsa", + "iax" }; const char *idxd_get_dev_name(struct idxd_device *idxd) @@ -53,6 +61,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) struct idxd_irq_entry *irq_entry; int i, msixcnt; int rc = 0; + union msix_perm mperm; msixcnt = pci_msix_vec_count(pdev); if (msixcnt < 0) { @@ -92,6 +101,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) for (i = 0; i < msixcnt; i++) { idxd->irq_entries[i].id = i; idxd->irq_entries[i].idxd = idxd; + spin_lock_init(&idxd->irq_entries[i].list_lock); } msix = &idxd->msix_entries[0]; @@ -131,6 +141,13 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) idxd_unmask_error_interrupts(idxd); + /* Setup MSIX permission table */ + mperm.bits = 0; + mperm.pasid = idxd->pasid; + mperm.pasid_en = device_pasid_enabled(idxd); + for (i = 1; i < msixcnt; i++) + iowrite32(mperm.bits, idxd->reg_base + idxd->msix_perm_offset + i * 8); + return 0; err_no_irq: @@ -201,17 +218,14 @@ static void idxd_read_table_offsets(struct idxd_device *idxd) struct device *dev = &idxd->pdev->dev; offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); - offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET - + sizeof(u64)); - idxd->grpcfg_offset = offsets.grpcfg * 0x100; + offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + sizeof(u64)); + idxd->grpcfg_offset = offsets.grpcfg * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); - idxd->wqcfg_offset = offsets.wqcfg * 0x100; - dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", - idxd->wqcfg_offset); - idxd->msix_perm_offset = offsets.msix_perm * 0x100; - dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", - idxd->msix_perm_offset); - idxd->perfmon_offset = offsets.perfmon * 0x100; + idxd->wqcfg_offset = offsets.wqcfg * IDXD_TABLE_MULT; + dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", idxd->wqcfg_offset); + idxd->msix_perm_offset = offsets.msix_perm * IDXD_TABLE_MULT; + dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", idxd->msix_perm_offset); + idxd->perfmon_offset = offsets.perfmon * IDXD_TABLE_MULT; dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); } @@ -265,8 +279,7 @@ static void idxd_read_caps(struct idxd_device *idxd) } } -static struct idxd_device *idxd_alloc(struct pci_dev *pdev, - void __iomem * const *iomap) +static struct idxd_device *idxd_alloc(struct pci_dev *pdev) { struct device *dev = &pdev->dev; struct idxd_device *idxd; @@ -276,12 +289,45 @@ static struct idxd_device *idxd_alloc(struct pci_dev *pdev, return NULL; idxd->pdev = pdev; - idxd->reg_base = iomap[IDXD_MMIO_BAR]; spin_lock_init(&idxd->dev_lock); return idxd; } +static int idxd_enable_system_pasid(struct idxd_device *idxd) +{ + int flags; + unsigned int pasid; + struct iommu_sva *sva; + + flags = SVM_FLAG_SUPERVISOR_MODE; + + sva = iommu_sva_bind_device(&idxd->pdev->dev, NULL, &flags); + if (IS_ERR(sva)) { + dev_warn(&idxd->pdev->dev, + "iommu sva bind failed: %ld\n", PTR_ERR(sva)); + return PTR_ERR(sva); + } + + pasid = iommu_sva_get_pasid(sva); + if (pasid == IOMMU_PASID_INVALID) { + iommu_sva_unbind_device(sva); + return -ENODEV; + } + + idxd->sva = sva; + idxd->pasid = pasid; + dev_dbg(&idxd->pdev->dev, "system pasid: %u\n", pasid); + return 0; +} + +static void idxd_disable_system_pasid(struct idxd_device *idxd) +{ + + iommu_sva_unbind_device(idxd->sva); + idxd->sva = NULL; +} + static int idxd_probe(struct idxd_device *idxd) { struct pci_dev *pdev = idxd->pdev; @@ -292,6 +338,14 @@ static int idxd_probe(struct idxd_device *idxd) idxd_device_init_reset(idxd); dev_dbg(dev, "IDXD reset complete\n"); + if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) { + rc = idxd_enable_system_pasid(idxd); + if (rc < 0) + dev_warn(dev, "Failed to enable PASID. No SVA support: %d\n", rc); + else + set_bit(IDXD_FLAG_PASID_ENABLED, &idxd->flags); + } + idxd_read_caps(idxd); idxd_read_table_offsets(idxd); @@ -322,29 +376,37 @@ static int idxd_probe(struct idxd_device *idxd) idxd_mask_error_interrupts(idxd); idxd_mask_msix_vectors(idxd); err_setup: + if (device_pasid_enabled(idxd)) + idxd_disable_system_pasid(idxd); return rc; } +static void idxd_type_init(struct idxd_device *idxd) +{ + if (idxd->type == IDXD_TYPE_DSA) + idxd->compl_size = sizeof(struct dsa_completion_record); + else if (idxd->type == IDXD_TYPE_IAX) + idxd->compl_size = sizeof(struct iax_completion_record); +} + static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - void __iomem * const *iomap; struct device *dev = &pdev->dev; struct idxd_device *idxd; int rc; - unsigned int mask; rc = pcim_enable_device(pdev); if (rc) return rc; - dev_dbg(dev, "Mapping BARs\n"); - mask = (1 << IDXD_MMIO_BAR); - rc = pcim_iomap_regions(pdev, mask, DRV_NAME); - if (rc) - return rc; + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev); + if (!idxd) + return -ENOMEM; - iomap = pcim_iomap_table(pdev); - if (!iomap) + dev_dbg(dev, "Mapping BARs\n"); + idxd->reg_base = pcim_iomap(pdev, IDXD_MMIO_BAR, 0); + if (!idxd->reg_base) return -ENOMEM; dev_dbg(dev, "Set DMA masks\n"); @@ -360,13 +422,10 @@ static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (rc) return rc; - dev_dbg(dev, "Alloc IDXD context\n"); - idxd = idxd_alloc(pdev, iomap); - if (!idxd) - return -ENOMEM; - idxd_set_type(idxd); + idxd_type_init(idxd); + dev_dbg(dev, "Set PCI master\n"); pci_set_master(pdev); pci_set_drvdata(pdev, idxd); @@ -452,6 +511,8 @@ static void idxd_remove(struct pci_dev *pdev) dev_dbg(&pdev->dev, "%s called\n", __func__); idxd_cleanup_sysfs(idxd); idxd_shutdown(pdev); + if (device_pasid_enabled(idxd)) + idxd_disable_system_pasid(idxd); mutex_lock(&idxd_idr_lock); idr_remove(&idxd_idrs[idxd->type], idxd->id); mutex_unlock(&idxd_idr_lock); @@ -470,7 +531,7 @@ static int __init idxd_init_module(void) int err, i; /* - * If the CPU does not support write512, there's no point in + * If the CPU does not support MOVDIR64B or ENQCMDS, there's no point in * enumerating the device. We can not utilize it. */ if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { @@ -478,8 +539,10 @@ static int __init idxd_init_module(void) return -ENODEV; } - pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", - DRV_NAME, IDXD_DRIVER_VERSION); + if (!boot_cpu_has(X86_FEATURE_ENQCMD)) + pr_warn("Platform does not have ENQCMD(S) support.\n"); + else + support_enqcmd = true; mutex_init(&idxd_idr_lock); for (i = 0; i < IDXD_TYPE_MAX; i++) |