diff options
author | Robin Murphy <robin.murphy@arm.com> | 2019-09-18 18:17:48 +0200 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2019-10-01 13:17:39 +0200 |
commit | 3f3b8d0c9c1838271543df9e655032117a663f88 (patch) | |
tree | fdc18e25e2fed5999670146deace661f5493c001 /drivers/iommu/arm-smmu.c | |
parent | Linux 5.4-rc1 (diff) | |
download | linux-3f3b8d0c9c1838271543df9e655032117a663f88.tar.xz linux-3f3b8d0c9c1838271543df9e655032117a663f88.zip |
iommu/arm-smmu: Remove .tlb_inv_range indirection
Fill in 'native' iommu_flush_ops callbacks for all the
arm_smmu_flush_ops variants, and clear up the remains of the previous
.tlb_inv_range abstraction.
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu/arm-smmu.c')
-rw-r--r-- | drivers/iommu/arm-smmu.c | 106 |
1 files changed, 61 insertions, 45 deletions
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index b18aac4c105e..78292e8e31a5 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) } static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, } static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) + size_t granule, void *cookie, bool leaf) { struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_device *smmu = smmu_domain->smmu; @@ -362,84 +362,100 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, } while (size -= granule); } -/* - * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears - * almost negligible, but the benefit of getting the first one in as far ahead - * of the sync as possible is significant, hence we don't just make this a - * no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think. - */ -static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, - size_t granule, bool leaf, void *cookie) +static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_device *smmu = smmu_domain->smmu; - - if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) - wmb(); + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); +} - arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); +static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, - size_t granule, void *cookie) +static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true); +} - ops->tlb_inv_range(iova, size, granule, false, cookie); - ops->tlb_sync(cookie); +static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false); + arm_smmu_tlb_sync_context(cookie); } -static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, - size_t granule, void *cookie) +static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size, + size_t granule, void *cookie) { - struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true); + arm_smmu_tlb_sync_context(cookie); +} - ops->tlb_inv_range(iova, size, granule, true, cookie); - ops->tlb_sync(cookie); +static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) +{ + arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true); } -static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, - unsigned long iova, size_t granule, - void *cookie) +static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size, + size_t granule, void *cookie) +{ + arm_smmu_tlb_inv_context_s2(cookie); +} +/* + * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears + * almost negligible, but the benefit of getting the first one in as far ahead + * of the sync as possible is significant, hence we don't just make this a + * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might + * think. + */ +static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather, + unsigned long iova, size_t granule, + void *cookie) { struct arm_smmu_domain *smmu_domain = cookie; - const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; + struct arm_smmu_device *smmu = smmu_domain->smmu; - ops->tlb_inv_range(iova, granule, granule, true, cookie); + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + wmb(); + + arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); } static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s1, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1, + .tlb_add_page = arm_smmu_tlb_add_page_s1, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s1, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2, + .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2, + .tlb_add_page = arm_smmu_tlb_add_page_s2, }, - .tlb_inv_range = arm_smmu_tlb_inv_range_s2, .tlb_sync = arm_smmu_tlb_sync_context, }; static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { .tlb = { .tlb_flush_all = arm_smmu_tlb_inv_context_s2, - .tlb_flush_walk = arm_smmu_tlb_inv_walk, - .tlb_flush_leaf = arm_smmu_tlb_inv_leaf, - .tlb_add_page = arm_smmu_tlb_add_page, + .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1, + .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1, + .tlb_add_page = arm_smmu_tlb_add_page_s2_v1, }, - .tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync, .tlb_sync = arm_smmu_tlb_sync_vmid, }; |