diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 21:03:54 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 21:03:54 +0100 |
commit | ef9417e8a903d3a68a83ea2da32f1db030341c37 (patch) | |
tree | 731162b8b3b26707ae98ab40830433951df52ca1 /drivers/iommu/intel-svm.c | |
parent | Merge branch 'pcmcia' of git://git.kernel.org/pub/scm/linux/kernel/git/brodo/... (diff) | |
parent | Merge branches 'arm/renesas', 'arm/omap', 'arm/exynos', 'x86/amd', 'x86/vt-d'... (diff) | |
download | linux-ef9417e8a903d3a68a83ea2da32f1db030341c37.tar.xz linux-ef9417e8a903d3a68a83ea2da32f1db030341c37.zip |
Merge tag 'iommu-updates-v4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu
Pull IOMMU updates from Joerg Roedel:
"This time there are not a lot of changes coming from the IOMMU side.
That is partly because I returned from my parental leave late in the
development process and probably partly because everyone was busy with
Spectre and Meltdown mitigation work and didn't find the time for
IOMMU work. So here are the few changes that queued up for this merge
window:
- 5-level page-table support for the Intel IOMMU.
- error reporting improvements for the AMD IOMMU driver
- additional DT bindings for ipmmu-vmsa (Renesas)
- small fixes and cleanups"
* tag 'iommu-updates-v4.16' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
iommu: Clean up of_iommu_init_fn
iommu/ipmmu-vmsa: Remove redundant of_iommu_init_fn hook
iommu/msm: Claim bus ops on probe
iommu/vt-d: Enable 5-level paging mode in the PASID entry
iommu/vt-d: Add a check for 5-level paging support
iommu/vt-d: Add a check for 1GB page support
iommu/vt-d: Enable upto 57 bits of domain address width
iommu/vt-d: Use domain instead of cache fetching
iommu/exynos: Don't unconditionally steal bus ops
iommu/omap: Fix debugfs_create_*() usage
iommu/vt-d: clean up pr_irq if request_threaded_irq fails
iommu: Check the result of iommu_group_get() for NULL
iommu/ipmmu-vmsa: Add r8a779(70|95) DT bindings
iommu/ipmmu-vmsa: Add r8a7796 DT binding
iommu/amd: Set the device table entry PPR bit for IOMMU V2 devices
iommu/amd - Record more information about unknown events
Diffstat (limited to 'drivers/iommu/intel-svm.c')
-rw-r--r-- | drivers/iommu/intel-svm.c | 32 |
1 files changed, 27 insertions, 5 deletions
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 0a826eb7fe48..35a408d0ae4f 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -26,6 +26,10 @@ #include <linux/interrupt.h> #include <asm/page.h> +#define PASID_ENTRY_P BIT_ULL(0) +#define PASID_ENTRY_FLPM_5LP BIT_ULL(9) +#define PASID_ENTRY_SRE BIT_ULL(11) + static irqreturn_t prq_event_thread(int irq, void *d); struct pasid_entry { @@ -41,6 +45,14 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu) struct page *pages; int order; + if (cpu_feature_enabled(X86_FEATURE_GBPAGES) && + !cap_fl1gp_support(iommu->cap)) + return -EINVAL; + + if (cpu_feature_enabled(X86_FEATURE_LA57) && + !cap_5lp_support(iommu->cap)) + return -EINVAL; + /* Start at 2 because it's defined as 2^(1+PSS) */ iommu->pasid_max = 2 << ecap_pss(iommu->ecap); @@ -129,6 +141,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) pr_err("IOMMU: %s: Failed to request IRQ for page request queue\n", iommu->name); dmar_free_hwirq(irq); + iommu->pr_irq = 0; goto err; } dmar_writeq(iommu->reg + DMAR_PQH_REG, 0ULL); @@ -144,9 +157,11 @@ int intel_svm_finish_prq(struct intel_iommu *iommu) dmar_writeq(iommu->reg + DMAR_PQT_REG, 0ULL); dmar_writeq(iommu->reg + DMAR_PQA_REG, 0ULL); - free_irq(iommu->pr_irq, iommu); - dmar_free_hwirq(iommu->pr_irq); - iommu->pr_irq = 0; + if (iommu->pr_irq) { + free_irq(iommu->pr_irq, iommu); + dmar_free_hwirq(iommu->pr_irq); + iommu->pr_irq = 0; + } free_pages((unsigned long)iommu->prq, PRQ_ORDER); iommu->prq = NULL; @@ -290,6 +305,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ struct intel_svm_dev *sdev; struct intel_svm *svm = NULL; struct mm_struct *mm = NULL; + u64 pasid_entry_val; int pasid_max; int ret; @@ -396,9 +412,15 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ kfree(sdev); goto out; } - iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1; + pasid_entry_val = (u64)__pa(mm->pgd) | PASID_ENTRY_P; } else - iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11); + pasid_entry_val = (u64)__pa(init_mm.pgd) | + PASID_ENTRY_P | PASID_ENTRY_SRE; + if (cpu_feature_enabled(X86_FEATURE_LA57)) + pasid_entry_val |= PASID_ENTRY_FLPM_5LP; + + iommu->pasid_table[svm->pasid].val = pasid_entry_val; + wmb(); /* In caching mode, we still have to flush with PASID 0 when * a PASID table entry becomes present. Not entirely clear |