summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorNadav Amit <namit@vmware.com>2021-07-23 11:32:08 +0200
committerJoerg Roedel <jroedel@suse.de>2021-08-02 11:26:06 +0200
commit3b122a5666cb7c0bb9a439fba0c9a6cf59f999c3 (patch)
treecb52667682da9101024d3be97229edf90d36295b /drivers/iommu
parentiommu/amd: Tailored gather logic for AMD (diff)
downloadlinux-3b122a5666cb7c0bb9a439fba0c9a6cf59f999c3.tar.xz
linux-3b122a5666cb7c0bb9a439fba0c9a6cf59f999c3.zip
iommu/amd: Sync once for scatter-gather operations
On virtual machines, software must flush the IOTLB after each page table entry update. The iommu_map_sg() code iterates through the given scatter-gather list and invokes iommu_map() for each element in the scatter-gather list, which calls into the vendor IOMMU driver through iommu_ops callback. As the result, a single sg mapping may lead to multiple IOTLB flushes. Fix this by adding amd_iotlb_sync_map() callback and flushing at this point after all sg mappings we set. This commit is followed and inspired by commit 933fcd01e97e2 ("iommu/vt-d: Add iotlb_sync_map callback"). Cc: Joerg Roedel <joro@8bytes.org> Cc: Will Deacon <will@kernel.org> Cc: Jiajun Cao <caojiajun@vmware.com> Cc: Robin Murphy <robin.murphy@arm.com> Cc: Lu Baolu <baolu.lu@linux.intel.com> Cc: iommu@lists.linux-foundation.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Nadav Amit <namit@vmware.com> Link: https://lore.kernel.org/r/20210723093209.714328-7-namit@vmware.com Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd/iommu.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c
index 0957be3b6274..cee91cdf0016 100644
--- a/drivers/iommu/amd/iommu.c
+++ b/drivers/iommu/amd/iommu.c
@@ -2028,6 +2028,16 @@ static int amd_iommu_attach_device(struct iommu_domain *dom,
return ret;
}
+static void amd_iommu_iotlb_sync_map(struct iommu_domain *dom,
+ unsigned long iova, size_t size)
+{
+ struct protection_domain *domain = to_pdomain(dom);
+ struct io_pgtable_ops *ops = &domain->iop.iop.ops;
+
+ if (ops->map)
+ domain_flush_np_cache(domain, iova, size);
+}
+
static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
phys_addr_t paddr, size_t page_size, int iommu_prot,
gfp_t gfp)
@@ -2046,10 +2056,8 @@ static int amd_iommu_map(struct iommu_domain *dom, unsigned long iova,
if (iommu_prot & IOMMU_WRITE)
prot |= IOMMU_PROT_IW;
- if (ops->map) {
+ if (ops->map)
ret = ops->map(ops, iova, paddr, page_size, prot, gfp);
- domain_flush_np_cache(domain, iova, page_size);
- }
return ret;
}
@@ -2229,6 +2237,7 @@ const struct iommu_ops amd_iommu_ops = {
.attach_dev = amd_iommu_attach_device,
.detach_dev = amd_iommu_detach_device,
.map = amd_iommu_map,
+ .iotlb_sync_map = amd_iommu_iotlb_sync_map,
.unmap = amd_iommu_unmap,
.iova_to_phys = amd_iommu_iova_to_phys,
.probe_device = amd_iommu_probe_device,