summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2015-12-07 19:18:52 +0100
committerWill Deacon <will.deacon@arm.com>2015-12-17 13:05:35 +0100
commit75df1386557c25188bd2383bbe8dd14a5ac81c06 (patch)
tree95850b0e6af3a2e4c5e15333922aba99f254542e
parentiommu/io-pgtable: Indicate granule for TLB maintenance (diff)
downloadlinux-75df1386557c25188bd2383bbe8dd14a5ac81c06.tar.xz
linux-75df1386557c25188bd2383bbe8dd14a5ac81c06.zip
iommu/arm-smmu: Invalidate TLBs properly
When invalidating an IOVA range potentially spanning multiple pages, such as when removing an entire intermediate-level table, we currently only issue an invalidation for the first IOVA of that range. Since the architecture specifies that address-based TLB maintenance operations target a single entry, an SMMU could feasibly retain live entries for subsequent pages within that unmapped range, which is not good. Make sure we hit every possible entry by iterating over the whole range at the granularity provided by the pagetable implementation. Signed-off-by: Robin Murphy <robin.murphy@arm.com> [will: added missing semicolons...] Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--drivers/iommu/arm-smmu-v3.c5
-rw-r--r--drivers/iommu/arm-smmu.c16
2 files changed, 17 insertions, 4 deletions
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 735ad2c58dd8..4991e79465ee 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -1360,7 +1360,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
}
- arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ do {
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ cmd.tlbi.addr += granule;
+ } while (size -= granule);
}
static struct iommu_gather_ops arm_smmu_gather_ops = {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 7e04bf5640ae..59ee4b8a3236 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -597,12 +597,18 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) {
iova &= ~12UL;
iova |= ARM_SMMU_CB_ASID(cfg);
- writel_relaxed(iova, reg);
+ do {
+ writel_relaxed(iova, reg);
+ iova += granule;
+ } while (size -= granule);
#ifdef CONFIG_64BIT
} else {
iova >>= 12;
iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48;
- writeq_relaxed(iova, reg);
+ do {
+ writeq_relaxed(iova, reg);
+ iova += granule >> 12;
+ } while (size -= granule);
#endif
}
#ifdef CONFIG_64BIT
@@ -610,7 +616,11 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
- writeq_relaxed(iova >> 12, reg);
+ iova >>= 12;
+ do {
+ writeq_relaxed(iova, reg);
+ iova += granule >> 12;
+ } while (size -= granule);
#endif
} else {
reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID;