diff options
Diffstat (limited to 'drivers/iommu/intel')
-rw-r--r-- | drivers/iommu/intel/Kconfig | 19 | ||||
-rw-r--r-- | drivers/iommu/intel/dmar.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel/iommu.c | 58 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.c | 28 | ||||
-rw-r--r-- | drivers/iommu/intel/pasid.h | 16 | ||||
-rw-r--r-- | drivers/iommu/intel/perf.c | 2 | ||||
-rw-r--r-- | drivers/iommu/intel/svm.c | 7 |
7 files changed, 67 insertions, 65 deletions
diff --git a/drivers/iommu/intel/Kconfig b/drivers/iommu/intel/Kconfig index 43ebd8af11c5..0ddb77115be7 100644 --- a/drivers/iommu/intel/Kconfig +++ b/drivers/iommu/intel/Kconfig @@ -25,9 +25,11 @@ config INTEL_IOMMU and include PCI device scope covered by these DMA remapping devices. +if INTEL_IOMMU + config INTEL_IOMMU_DEBUGFS bool "Export Intel IOMMU internals in Debugfs" - depends on INTEL_IOMMU && IOMMU_DEBUGFS + depends on IOMMU_DEBUGFS select DMAR_PERF help !!!WARNING!!! @@ -41,7 +43,7 @@ config INTEL_IOMMU_DEBUGFS config INTEL_IOMMU_SVM bool "Support for Shared Virtual Memory with Intel IOMMU" - depends on INTEL_IOMMU && X86_64 + depends on X86_64 select PCI_PASID select PCI_PRI select MMU_NOTIFIER @@ -53,9 +55,8 @@ config INTEL_IOMMU_SVM means of a Process Address Space ID (PASID). config INTEL_IOMMU_DEFAULT_ON - def_bool y - prompt "Enable Intel DMA Remapping Devices by default" - depends on INTEL_IOMMU + bool "Enable Intel DMA Remapping Devices by default" + default y help Selecting this option will enable a DMAR device at boot time if one is found. If this option is not selected, DMAR support can @@ -63,7 +64,7 @@ config INTEL_IOMMU_DEFAULT_ON config INTEL_IOMMU_BROKEN_GFX_WA bool "Workaround broken graphics drivers (going away soon)" - depends on INTEL_IOMMU && BROKEN && X86 + depends on BROKEN && X86 help Current Graphics drivers tend to use physical address for DMA and avoid using DMA APIs. Setting this config @@ -74,7 +75,7 @@ config INTEL_IOMMU_BROKEN_GFX_WA config INTEL_IOMMU_FLOPPY_WA def_bool y - depends on INTEL_IOMMU && X86 + depends on X86 help Floppy disk drivers are known to bypass DMA API calls thereby failing to work when IOMMU is enabled. This @@ -83,7 +84,7 @@ config INTEL_IOMMU_FLOPPY_WA config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON bool "Enable Intel IOMMU scalable mode by default" - depends on INTEL_IOMMU + default y help Selecting this option will enable by default the scalable mode if hardware presents the capability. The scalable mode is defined in @@ -92,3 +93,5 @@ config INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON is not selected, scalable mode support could also be enabled by passing intel_iommu=sm_on to the kernel. If not sure, please use the default value. + +endif # INTEL_IOMMU diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index d66f79acd14d..0ec5514c9980 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -149,8 +149,6 @@ dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) } else { info = kzalloc(size, GFP_KERNEL); if (!info) { - pr_warn("Out of memory when allocating notify_info " - "for %s.\n", pci_name(dev)); if (dmar_dev_scope_status == 0) dmar_dev_scope_status = -ENOMEM; return NULL; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index c12cc955389a..d75f59ae28e6 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -33,6 +33,7 @@ #include <linux/iommu.h> #include <linux/dma-iommu.h> #include <linux/intel-iommu.h> +#include <linux/intel-svm.h> #include <linux/syscore_ops.h> #include <linux/tboot.h> #include <linux/dmi.h> @@ -327,17 +328,8 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); -#ifdef CONFIG_INTEL_IOMMU_DEFAULT_ON -int dmar_disabled = 0; -#else -int dmar_disabled = 1; -#endif /* CONFIG_INTEL_IOMMU_DEFAULT_ON */ - -#ifdef CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON -int intel_iommu_sm = 1; -#else -int intel_iommu_sm; -#endif /* CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON */ +int dmar_disabled = !IS_ENABLED(CONFIG_INTEL_IOMMU_DEFAULT_ON); +int intel_iommu_sm = IS_ENABLED(CONFIG_INTEL_IOMMU_SCALABLE_MODE_DEFAULT_ON); int intel_iommu_enabled = 0; EXPORT_SYMBOL_GPL(intel_iommu_enabled); @@ -441,8 +433,11 @@ static int __init intel_iommu_setup(char *str) pr_info("Disable supported super page\n"); intel_iommu_superpage = 0; } else if (!strncmp(str, "sm_on", 5)) { - pr_info("Intel-IOMMU: scalable mode supported\n"); + pr_info("Enable scalable mode if hardware supports\n"); intel_iommu_sm = 1; + } else if (!strncmp(str, "sm_off", 6)) { + pr_info("Scalable mode is disallowed\n"); + intel_iommu_sm = 0; } else if (!strncmp(str, "tboot_noforce", 13)) { pr_info("Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n"); intel_iommu_tboot_noforce = 1; @@ -582,7 +577,7 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) int iommu_id; /* si_domain and vm domain should not get here. */ - if (WARN_ON(domain->domain.type != IOMMU_DOMAIN_DMA)) + if (WARN_ON(!iommu_is_dma_domain(&domain->domain))) return NULL; for_each_domain_iommu(iommu_id, domain) @@ -1034,7 +1029,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; if (domain_use_first_level(domain)) { pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US; - if (domain->domain.type == IOMMU_DOMAIN_DMA) + if (iommu_is_dma_domain(&domain->domain)) pteval |= DMA_FL_PTE_ACCESS; } if (cmpxchg64(&pte->val, 0ULL, pteval)) @@ -1547,7 +1542,7 @@ static void iommu_enable_dev_iotlb(struct device_domain_info *info) if (info->pri_supported && (info->pasid_enabled ? pci_prg_resp_pasid_required(pdev) : 1) && - !pci_reset_pri(pdev) && !pci_enable_pri(pdev, 32)) + !pci_reset_pri(pdev) && !pci_enable_pri(pdev, PRQ_DEPTH)) info->pri_enabled = 1; #endif if (info->ats_supported && pci_ats_page_aligned(pdev) && @@ -1779,11 +1774,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) spin_lock_init(&iommu->lock); iommu->domain_ids = kcalloc(nlongs, sizeof(unsigned long), GFP_KERNEL); - if (!iommu->domain_ids) { - pr_err("%s: Allocating domain id array failed\n", - iommu->name); + if (!iommu->domain_ids) return -ENOMEM; - } size = (ALIGN(ndomains, 256) >> 8) * sizeof(struct dmar_domain **); iommu->domains = kzalloc(size, GFP_KERNEL); @@ -1979,10 +1971,6 @@ static void domain_exit(struct dmar_domain *domain) /* Remove associated devices and clear attached or cached domains */ domain_remove_dev_info(domain); - /* destroy iovas */ - if (domain->domain.type == IOMMU_DOMAIN_DMA) - iommu_put_dma_cookie(&domain->domain); - if (domain->pgd) { struct page *freelist; @@ -2347,13 +2335,9 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr |= DMA_FL_PTE_PRESENT; if (domain_use_first_level(domain)) { - attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US; - - if (domain->domain.type == IOMMU_DOMAIN_DMA) { - attr |= DMA_FL_PTE_ACCESS; - if (prot & DMA_PTE_WRITE) - attr |= DMA_FL_PTE_DIRTY; - } + attr |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + if (prot & DMA_PTE_WRITE) + attr |= DMA_FL_PTE_DIRTY; } pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | attr; @@ -3228,7 +3212,6 @@ static int __init init_dmars(void) g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), GFP_KERNEL); if (!g_iommus) { - pr_err("Allocating global iommu array failed\n"); ret = -ENOMEM; goto error; } @@ -4532,6 +4515,7 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) switch (type) { case IOMMU_DOMAIN_DMA: + case IOMMU_DOMAIN_DMA_FQ: case IOMMU_DOMAIN_UNMANAGED: dmar_domain = alloc_domain(0); if (!dmar_domain) { @@ -4544,10 +4528,6 @@ static struct iommu_domain *intel_iommu_domain_alloc(unsigned type) return NULL; } - if (type == IOMMU_DOMAIN_DMA && - iommu_get_dma_cookie(&dmar_domain->domain)) - return NULL; - domain = &dmar_domain->domain; domain->geometry.aperture_start = 0; domain->geometry.aperture_end = @@ -5205,12 +5185,8 @@ static void intel_iommu_release_device(struct device *dev) static void intel_iommu_probe_finalize(struct device *dev) { - struct iommu_domain *domain = iommu_get_domain_for_dev(dev); - - if (domain && domain->type == IOMMU_DOMAIN_DMA) - iommu_setup_dma_ops(dev, 0, U64_MAX); - else - set_dma_ops(dev, NULL); + set_dma_ops(dev, NULL); + iommu_setup_dma_ops(dev, 0, U64_MAX); } static void intel_iommu_get_resv_regions(struct device *device, diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c6cf44a6c923..07c390aed1fe 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -511,29 +511,39 @@ void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, u32 pasid, bool fault_ignore) { struct pasid_entry *pte; - u16 did; + u16 did, pgtt; pte = intel_pasid_get_entry(dev, pasid); if (WARN_ON(!pte)) return; - if (!(pte->val[0] & PASID_PTE_PRESENT)) + if (!pasid_pte_is_present(pte)) return; did = pasid_get_domain_id(pte); + pgtt = pasid_pte_get_pgtt(pte); + intel_pasid_clear_entry(dev, pasid, fault_ignore); if (!ecap_coherent(iommu->ecap)) clflush_cache_range(pte, sizeof(*pte)); pasid_cache_invalidation_with_pasid(iommu, did, pasid); - qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + + if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); + else + iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); /* Device IOTLB doesn't need to be flushed in caching mode. */ if (!cap_caching_mode(iommu->cap)) devtlb_invalidation_with_pasid(iommu, dev, pasid); } +/* + * This function flushes cache for a newly setup pasid table entry. + * Caller of it should not modify the in-use pasid table entries. + */ static void pasid_flush_caches(struct intel_iommu *iommu, struct pasid_entry *pte, u32 pasid, u16 did) @@ -585,6 +595,10 @@ int intel_pasid_setup_first_level(struct intel_iommu *iommu, if (WARN_ON(!pte)) return -EINVAL; + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); /* Setup the first level page table pointer: */ @@ -684,6 +698,10 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, return -ENODEV; } + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); pasid_set_domain_id(pte, did); pasid_set_slptr(pte, pgd_val); @@ -723,6 +741,10 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, return -ENODEV; } + /* Caller must ensure PASID entry is not in use. */ + if (pasid_pte_is_present(pte)) + return -EBUSY; + pasid_clear_entry(pte); pasid_set_domain_id(pte, did); pasid_set_address_width(pte, iommu->agaw); diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h index 5ff61c3d401f..d5552e2c160d 100644 --- a/drivers/iommu/intel/pasid.h +++ b/drivers/iommu/intel/pasid.h @@ -28,12 +28,12 @@ #define VCMD_CMD_ALLOC 0x1 #define VCMD_CMD_FREE 0x2 #define VCMD_VRSP_IP 0x1 -#define VCMD_VRSP_SC(e) (((e) >> 1) & 0x3) +#define VCMD_VRSP_SC(e) (((e) & 0xff) >> 1) #define VCMD_VRSP_SC_SUCCESS 0 -#define VCMD_VRSP_SC_NO_PASID_AVAIL 2 -#define VCMD_VRSP_SC_INVALID_PASID 2 -#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 8) & 0xfffff) -#define VCMD_CMD_OPERAND(e) ((e) << 8) +#define VCMD_VRSP_SC_NO_PASID_AVAIL 16 +#define VCMD_VRSP_SC_INVALID_PASID 16 +#define VCMD_VRSP_RESULT_PASID(e) (((e) >> 16) & 0xfffff) +#define VCMD_CMD_OPERAND(e) ((e) << 16) /* * Domain ID reserved for pasid entries programmed for first-level * only and pass-through transfer modes. @@ -99,6 +99,12 @@ static inline bool pasid_pte_is_present(struct pasid_entry *pte) return READ_ONCE(pte->val[0]) & PASID_PTE_PRESENT; } +/* Get PGTT field of a PASID table entry */ +static inline u16 pasid_pte_get_pgtt(struct pasid_entry *pte) +{ + return (u16)((READ_ONCE(pte->val[0]) >> 6) & 0x7); +} + extern unsigned int intel_pasid_max_id; int intel_pasid_alloc_table(struct device *dev); void intel_pasid_free_table(struct device *dev); diff --git a/drivers/iommu/intel/perf.c b/drivers/iommu/intel/perf.c index 73b7ec705552..0e8e03252d92 100644 --- a/drivers/iommu/intel/perf.c +++ b/drivers/iommu/intel/perf.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/** +/* * perf.c - performance monitor * * Copyright (C) 2021 Intel Corporation diff --git a/drivers/iommu/intel/svm.c b/drivers/iommu/intel/svm.c index 9b0f22bc0514..2014fe8695ac 100644 --- a/drivers/iommu/intel/svm.c +++ b/drivers/iommu/intel/svm.c @@ -31,8 +31,6 @@ static irqreturn_t prq_event_thread(int irq, void *d); static void intel_svm_drain_prq(struct device *dev, u32 pasid); #define to_intel_svm_dev(handle) container_of(handle, struct intel_svm_dev, sva) -#define PRQ_ORDER 0 - static DEFINE_XARRAY_ALLOC(pasid_private_array); static int pasid_private_add(ioasid_t pasid, void *priv) { @@ -675,7 +673,6 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree_rcu(sdev, rcu); if (list_empty(&svm->devs)) { - intel_svm_free_pasid(mm); if (svm->notifier.ops) { mmu_notifier_unregister(&svm->notifier, mm); /* Clear mm's pasid. */ @@ -690,6 +687,8 @@ static int intel_svm_unbind_mm(struct device *dev, u32 pasid) kfree(svm); } } + /* Drop a PASID reference and free it if no reference. */ + intel_svm_free_pasid(mm); } out: return ret; @@ -724,8 +723,6 @@ struct page_req_dsc { u64 priv_data[2]; }; -#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x20) - static bool is_canonical_address(u64 addr) { int shift = 64 - (__VIRTUAL_MASK_SHIFT + 1); |