summaryrefslogtreecommitdiffstats
path: root/drivers/iommu
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-12-22 12:15:35 +0100
committerJoerg Roedel <jroedel@suse.de>2015-12-28 17:18:53 +0100
commit4eeca8c5e72fad752eba9efc293c924d65faa86e (patch)
tree77f8a974f3096081750e66d93e1a81898011b001 /drivers/iommu
parentiommu/amd: Remove need_flush from struct dma_ops_domain (diff)
downloadlinux-4eeca8c5e72fad752eba9efc293c924d65faa86e.tar.xz
linux-4eeca8c5e72fad752eba9efc293c924d65faa86e.zip
iommu/amd: Optimize dma_ops_free_addresses
Don't flush the iommu tlb when we free something behind the current next_bit pointer. Update the next_bit pointer instead and let the flush happen on the next wraparound in the allocation path. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 39a2048a6cd2..c657e48f0aed 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
return;
#endif
- if (amd_iommu_unmap_flush ||
- (address + pages > range->next_bit)) {
+ if (amd_iommu_unmap_flush) {
domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain);
}
@@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&range->bitmap_lock, flags);
+ if (address + pages > range->next_bit)
+ range->next_bit = address + pages;
bitmap_clear(range->bitmap, address, pages);
spin_unlock_irqrestore(&range->bitmap_lock, flags);