summaryrefslogtreecommitdiffstats
path: root/drivers/iommu/mtk_iommu.c
diff options
context:
space:
mode:
authorYong Wu <yong.wu@mediatek.com>2019-11-04 08:01:04 +0100
committerJoerg Roedel <jroedel@suse.de>2019-11-11 15:02:34 +0100
commita7a04ea34e1c483d10d3e72250ff5503b1076fe3 (patch)
treebcee15eff9401350fa569ced4209d3f5990087a9 /drivers/iommu/mtk_iommu.c
parentiommu/mediatek: Add a new tlb_lock for tlb_flush (diff)
downloadlinux-a7a04ea34e1c483d10d3e72250ff5503b1076fe3.tar.xz
linux-a7a04ea34e1c483d10d3e72250ff5503b1076fe3.zip
iommu/mediatek: Use gather to achieve the tlb range flush
Use the iommu_gather mechanism to achieve the tlb range flush. Gather the iova range in the "tlb_add_page", then flush the merged iova range in iotlb_sync. Suggested-by: Tomasz Figa <tfiga@chromium.org> Signed-off-by: Yong Wu <yong.wu@mediatek.com> Signed-off-by: Joerg Roedel <jroedel@suse.de>
Diffstat (limited to 'drivers/iommu/mtk_iommu.c')
-rw-r--r--drivers/iommu/mtk_iommu.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c
index c2f6c78fee44..81ac95fe6f3b 100644
--- a/drivers/iommu/mtk_iommu.c
+++ b/drivers/iommu/mtk_iommu.c
@@ -245,11 +245,9 @@ static void mtk_iommu_tlb_flush_page_nosync(struct iommu_iotlb_gather *gather,
void *cookie)
{
struct mtk_iommu_data *data = cookie;
- unsigned long flags;
+ struct iommu_domain *domain = &data->m4u_dom->domain;
- spin_lock_irqsave(&data->tlb_lock, flags);
- mtk_iommu_tlb_add_flush_nosync(iova, granule, granule, true, cookie);
- spin_unlock_irqrestore(&data->tlb_lock, flags);
+ iommu_iotlb_gather_add_page(domain, gather, iova, granule);
}
static const struct iommu_flush_ops mtk_iommu_flush_ops = {
@@ -469,9 +467,15 @@ static void mtk_iommu_iotlb_sync(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather)
{
struct mtk_iommu_data *data = mtk_iommu_get_m4u_data();
+ size_t length = gather->end - gather->start;
unsigned long flags;
+ if (gather->start == ULONG_MAX)
+ return;
+
spin_lock_irqsave(&data->tlb_lock, flags);
+ mtk_iommu_tlb_add_flush_nosync(gather->start, length, gather->pgsize,
+ false, data);
mtk_iommu_tlb_sync(data);
spin_unlock_irqrestore(&data->tlb_lock, flags);
}