summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/amd_iommu.c
diff options
context:
space:
mode:
authorJoerg Roedel <jroedel@suse.de>2015-12-21 18:47:11 +0100
committerJoerg Roedel <jroedel@suse.de>2015-12-28 17:18:53 +0100
commitab7032bb9c37f9d36ade2267a01a6edf8f2d41d7 (patch)
treeded1550fd1d4923e8d39a2b3fe24acad304103df /drivers/iommu/amd_iommu.c
parentiommu/amd: Iterate over all aperture ranges in dma_ops_area_alloc (diff)
downloadlinux-ab7032bb9c37f9d36ade2267a01a6edf8f2d41d7.tar.xz
linux-ab7032bb9c37f9d36ade2267a01a6edf8f2d41d7.zip
iommu/amd: Remove need_flush from struct dma_ops_domain
The flushing of iommu tlbs is now done on a per-range basis. So there is no need anymore for domain-wide flush tracking. Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/amd_iommu.c')
-rw-r--r--drivers/iommu/amd_iommu.c30
1 files changed, 6 insertions, 24 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index faf51a066e98..39a2048a6cd2 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -151,9 +151,6 @@ struct dma_ops_domain {
/* address space relevant data */
struct aperture_range *aperture[APERTURE_MAX_RANGES];
-
- /* This will be set to true when TLB needs to be flushed */
- bool need_flush;
};
/****************************************************************************
@@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
unsigned long align_mask,
u64 dma_mask)
{
- unsigned long next_bit, boundary_size, mask;
+ unsigned long boundary_size, mask;
unsigned long address = -1;
int start = dom->next_index;
int i;
@@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
if (!range || range->offset >= dma_mask)
continue;
- next_bit = range->next_bit;
-
address = dma_ops_aperture_alloc(dom, range, pages,
dma_mask, boundary_size,
align_mask);
@@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
dom->next_index = i;
break;
}
-
- if (next_bit > range->next_bit)
- dom->need_flush = true;
}
return address;
@@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
#ifdef CONFIG_IOMMU_STRESS
dom->next_index = 0;
- dom->need_flush = true;
#endif
address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask);
@@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
return;
#endif
- if (address + pages > range->next_bit) {
+ if (amd_iommu_unmap_flush ||
+ (address + pages > range->next_bit)) {
domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain);
}
@@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root)
goto free_dma_dom;
- dma_dom->need_flush = false;
-
add_domain_to_list(&dma_dom->domain);
if (alloc_new_range(dma_dom, true, GFP_KERNEL))
@@ -2503,11 +2493,10 @@ retry:
ADD_STATS_COUNTER(alloced_io_mem, size);
- if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
- domain_flush_tlb(&dma_dom->domain);
- dma_dom->need_flush = false;
- } else if (unlikely(amd_iommu_np_cache))
+ if (unlikely(amd_iommu_np_cache)) {
domain_flush_pages(&dma_dom->domain, address, size);
+ domain_flush_complete(&dma_dom->domain);
+ }
out:
return address;
@@ -2519,8 +2508,6 @@ out_unmap:
dma_ops_domain_unmap(dma_dom, start);
}
- domain_flush_pages(&dma_dom->domain, address, size);
-
dma_ops_free_addresses(dma_dom, address, pages);
return DMA_ERROR_CODE;
@@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
start += PAGE_SIZE;
}
- if (amd_iommu_unmap_flush || dma_dom->need_flush) {
- domain_flush_pages(&dma_dom->domain, flush_addr, size);
- dma_dom->need_flush = false;
- }
-
SUB_STATS_COUNTER(alloced_io_mem, size);
dma_ops_free_addresses(dma_dom, dma_addr, pages);