summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/dma-iommu.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2021-08-11 14:21:38 +0200
committerJoerg Roedel <jroedel@suse.de>2021-08-18 13:27:49 +0200
commit452e69b58c2889e5546edb92d9e66285410f7463 (patch)
treeba5049ab1aab6991887ab1b1b8baf141f8509e14 /drivers/iommu/dma-iommu.c
parentiommu: Merge strictness and domain type configs (diff)
downloadlinux-452e69b58c2889e5546edb92d9e66285410f7463.tar.xz
linux-452e69b58c2889e5546edb92d9e66285410f7463.zip
iommu: Allow enabling non-strict mode dynamically
Allocating and enabling a flush queue is in fact something we can reasonably do while a DMA domain is active, without having to rebuild it from scratch. Thus we can allow a strict -> non-strict transition from sysfs without requiring to unbind the device's driver, which is of particular interest to users who want to make selective relaxations to critical devices like the one serving their root filesystem. Disabling and draining a queue also seems technically possible to achieve without rebuilding the whole domain, but would certainly be more involved. Furthermore there's not such a clear use-case for tightening up security *after* the device may already have done whatever it is that you don't trust it not to do, so we only consider the relaxation case. Signed-off-by: Robin Murphy <robin.murphy@arm.com> Link: https://lore.kernel.org/r/d652966348c78457c38bf18daf369272a4ebc2c9.1628682049.git.robin.murphy@arm.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/dma-iommu.c')
-rw-r--r--drivers/iommu/dma-iommu.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index b7ae855c1e89..bac7370ead3e 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -317,6 +317,30 @@ static bool dev_is_untrusted(struct device *dev)
return dev_is_pci(dev) && to_pci_dev(dev)->untrusted;
}
+/* sysfs updates are serialised by the mutex of the group owning @domain */
+int iommu_dma_init_fq(struct iommu_domain *domain)
+{
+ struct iommu_dma_cookie *cookie = domain->iova_cookie;
+ int ret;
+
+ if (cookie->fq_domain)
+ return 0;
+
+ ret = init_iova_flush_queue(&cookie->iovad, iommu_dma_flush_iotlb_all,
+ iommu_dma_entry_dtor);
+ if (ret) {
+ pr_warn("iova flush queue initialization failed\n");
+ return ret;
+ }
+ /*
+ * Prevent incomplete iovad->fq being observable. Pairs with path from
+ * __iommu_dma_unmap() through iommu_dma_free_iova() to queue_iova()
+ */
+ smp_wmb();
+ WRITE_ONCE(cookie->fq_domain, domain);
+ return 0;
+}
+
/**
* iommu_dma_init_domain - Initialise a DMA mapping domain
* @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
@@ -371,15 +395,8 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
init_iova_domain(iovad, 1UL << order, base_pfn);
/* If the FQ fails we can simply fall back to strict mode */
- if (domain->type == IOMMU_DOMAIN_DMA_FQ && !cookie->fq_domain) {
- if (init_iova_flush_queue(iovad, iommu_dma_flush_iotlb_all,
- iommu_dma_entry_dtor)) {
- pr_warn("iova flush queue initialization failed\n");
- domain->type = IOMMU_DOMAIN_DMA;
- } else {
- cookie->fq_domain = domain;
- }
- }
+ if (domain->type == IOMMU_DOMAIN_DMA_FQ && iommu_dma_init_fq(domain))
+ domain->type = IOMMU_DOMAIN_DMA;
return iova_reserve_iommu_regions(dev, domain);
}
@@ -454,17 +471,17 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
}
static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
- dma_addr_t iova, size_t size, struct page *freelist)
+ dma_addr_t iova, size_t size, struct iommu_iotlb_gather *gather)
{
struct iova_domain *iovad = &cookie->iovad;
/* The MSI case is only ever cleaning up its most recent allocation */
if (cookie->type == IOMMU_DMA_MSI_COOKIE)
cookie->msi_iova -= size;
- else if (cookie->fq_domain) /* non-strict mode */
+ else if (gather && gather->queued)
queue_iova(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad),
- (unsigned long)freelist);
+ (unsigned long)gather->freelist);
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
@@ -483,14 +500,14 @@ static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
dma_addr -= iova_off;
size = iova_align(iovad, size + iova_off);
iommu_iotlb_gather_init(&iotlb_gather);
- iotlb_gather.queued = cookie->fq_domain;
+ iotlb_gather.queued = READ_ONCE(cookie->fq_domain);
unmapped = iommu_unmap_fast(domain, dma_addr, size, &iotlb_gather);
WARN_ON(unmapped != size);
- if (!cookie->fq_domain)
+ if (!iotlb_gather.queued)
iommu_iotlb_sync(domain, &iotlb_gather);
- iommu_dma_free_iova(cookie, dma_addr, size, iotlb_gather.freelist);
+ iommu_dma_free_iova(cookie, dma_addr, size, &iotlb_gather);
}
static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,