diff options
author | Jean-Philippe Brucker <jean-philippe@linaro.org> | 2021-01-22 16:10:54 +0100 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2021-01-22 16:44:32 +0100 |
commit | eba8d2f8f80315094b61eaf2bc6cd481741d6d93 (patch) | |
tree | 9a4054b7854b84a6adc0bc909f842e251ec2e032 /drivers/iommu | |
parent | iommu/arm-smmu-v3: Use DEFINE_RES_MEM() to simplify code (diff) | |
download | linux-eba8d2f8f80315094b61eaf2bc6cd481741d6d93.tar.xz linux-eba8d2f8f80315094b61eaf2bc6cd481741d6d93.zip |
iommu/arm-smmu-v3: Split arm_smmu_tlb_inv_range()
Extract some of the cmd initialization and the ATC invalidation from
arm_smmu_tlb_inv_range(), to allow an MMU notifier to invalidate a VA
range by ASID.
Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com>
Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20210122151054.2833521-2-jean-philippe@linaro.org
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu')
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 63 |
1 files changed, 36 insertions, 27 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index f04c55a7503c..86cbac77c941 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -1658,40 +1658,28 @@ static void arm_smmu_tlb_inv_context(void *cookie) arm_smmu_atc_inv_domain(smmu_domain, 0, 0, 0); } -static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, - size_t granule, bool leaf, - struct arm_smmu_domain *smmu_domain) +static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, + unsigned long iova, size_t size, + size_t granule, + struct arm_smmu_domain *smmu_domain) { struct arm_smmu_device *smmu = smmu_domain->smmu; - unsigned long start = iova, end = iova + size, num_pages = 0, tg = 0; + unsigned long end = iova + size, num_pages = 0, tg = 0; size_t inv_range = granule; struct arm_smmu_cmdq_batch cmds = {}; - struct arm_smmu_cmdq_ent cmd = { - .tlbi = { - .leaf = leaf, - }, - }; if (!size) return; - if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - cmd.opcode = CMDQ_OP_TLBI_NH_VA; - cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; - } else { - cmd.opcode = CMDQ_OP_TLBI_S2_IPA; - cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; - } - if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { /* Get the leaf page size */ tg = __ffs(smmu_domain->domain.pgsize_bitmap); /* Convert page size of 12,14,16 (log2) to 1,2,3 */ - cmd.tlbi.tg = (tg - 10) / 2; + cmd->tlbi.tg = (tg - 10) / 2; /* Determine what level the granule is at */ - cmd.tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); + cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3)); num_pages = size >> tg; } @@ -1709,11 +1697,11 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, /* Determine the power of 2 multiple number of pages */ scale = __ffs(num_pages); - cmd.tlbi.scale = scale; + cmd->tlbi.scale = scale; /* Determine how many chunks of 2^scale size we have */ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX; - cmd.tlbi.num = num - 1; + cmd->tlbi.num = num - 1; /* range is num * 2^scale * pgsize */ inv_range = num << (scale + tg); @@ -1722,17 +1710,37 @@ static void arm_smmu_tlb_inv_range(unsigned long iova, size_t size, num_pages -= num << scale; } - cmd.tlbi.addr = iova; - arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); + cmd->tlbi.addr = iova; + arm_smmu_cmdq_batch_add(smmu, &cmds, cmd); iova += inv_range; } arm_smmu_cmdq_batch_submit(smmu, &cmds); +} + +static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, + size_t granule, bool leaf, + struct arm_smmu_domain *smmu_domain) +{ + struct arm_smmu_cmdq_ent cmd = { + .tlbi = { + .leaf = leaf, + }, + }; + + if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { + cmd.opcode = CMDQ_OP_TLBI_NH_VA; + cmd.tlbi.asid = smmu_domain->s1_cfg.cd.asid; + } else { + cmd.opcode = CMDQ_OP_TLBI_S2_IPA; + cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; + } + __arm_smmu_tlb_inv_range(&cmd, iova, size, granule, smmu_domain); /* * Unfortunately, this can't be leaf-only since we may have * zapped an entire table. */ - arm_smmu_atc_inv_domain(smmu_domain, 0, start, size); + arm_smmu_atc_inv_domain(smmu_domain, 0, iova, size); } static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, @@ -1748,7 +1756,7 @@ static void arm_smmu_tlb_inv_page_nosync(struct iommu_iotlb_gather *gather, static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, size_t granule, void *cookie) { - arm_smmu_tlb_inv_range(iova, size, granule, false, cookie); + arm_smmu_tlb_inv_range_domain(iova, size, granule, false, cookie); } static const struct iommu_flush_ops arm_smmu_flush_ops = { @@ -2271,8 +2279,9 @@ static void arm_smmu_iotlb_sync(struct iommu_domain *domain, { struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - arm_smmu_tlb_inv_range(gather->start, gather->end - gather->start, - gather->pgsize, true, smmu_domain); + arm_smmu_tlb_inv_range_domain(gather->start, + gather->end - gather->start, + gather->pgsize, true, smmu_domain); } static phys_addr_t |