diff options
author | Nicolin Chen <nicolinc@nvidia.com> | 2024-08-30 00:34:39 +0200 |
---|---|---|
committer | Will Deacon <will@kernel.org> | 2024-08-30 16:28:25 +0200 |
commit | a9d40285bdefef700ebc7551ef79d2f3e4559e73 (patch) | |
tree | 89d7a6d8e599af70aad9ed2a45ca03d8d389529f /drivers/iommu/arm | |
parent | iommu/arm-smmu-v3: Start a new batch if new command is not supported (diff) | |
download | linux-a9d40285bdefef700ebc7551ef79d2f3e4559e73.tar.xz linux-a9d40285bdefef700ebc7551ef79d2f3e4559e73.zip |
iommu/tegra241-cmdqv: Limit CMDs for VCMDQs of a guest owned VINTF
When VCMDQs are assigned to a VINTF owned by a guest (HYP_OWN bit unset),
only TLB and ATC invalidation commands are supported by the VCMDQ HW. So,
implement the new cmdq->supports_cmd op to scan the input cmd in order to
make sure that it is supported by the selected queue.
Note that the guest VM shouldn't have HYP_OWN bit being set regardless of
guest kernel driver writing it or not, i.e. the hypervisor running in the
host OS should wire this bit to zero when trapping a write access to this
VINTF_CONFIG register from a guest kernel.
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
Link: https://lore.kernel.org/r/8160292337059b91271045800e5c62f7295e2c24.1724970714.git.nicolinc@nvidia.com
Signed-off-by: Will Deacon <will@kernel.org>
Diffstat (limited to 'drivers/iommu/arm')
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 28 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 | ||||
-rw-r--r-- | drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c | 34 |
3 files changed, 51 insertions, 14 deletions
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 1e67fd4fbcf3..0c28e2b5b723 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -346,12 +346,13 @@ static int arm_smmu_cmdq_build_cmd(u64 *cmd, struct arm_smmu_cmdq_ent *ent) return 0; } -static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu) +static struct arm_smmu_cmdq *arm_smmu_get_cmdq(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { struct arm_smmu_cmdq *cmdq = NULL; if (smmu->impl_ops && smmu->impl_ops->get_secondary_cmdq) - cmdq = smmu->impl_ops->get_secondary_cmdq(smmu); + cmdq = smmu->impl_ops->get_secondary_cmdq(smmu, ent); return cmdq ?: &smmu->cmdq; } @@ -897,7 +898,7 @@ static int __arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, } return arm_smmu_cmdq_issue_cmdlist( - smmu, arm_smmu_get_cmdq(smmu), cmd, 1, sync); + smmu, arm_smmu_get_cmdq(smmu, ent), cmd, 1, sync); } static int arm_smmu_cmdq_issue_cmd(struct arm_smmu_device *smmu, @@ -913,10 +914,11 @@ static int arm_smmu_cmdq_issue_cmd_with_sync(struct arm_smmu_device *smmu, } static void arm_smmu_cmdq_batch_init(struct arm_smmu_device *smmu, - struct arm_smmu_cmdq_batch *cmds) + struct arm_smmu_cmdq_batch *cmds, + struct arm_smmu_cmdq_ent *ent) { cmds->num = 0; - cmds->cmdq = arm_smmu_get_cmdq(smmu); + cmds->cmdq = arm_smmu_get_cmdq(smmu, ent); } static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, @@ -931,13 +933,13 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, if (force_sync || unsupported_cmd) { arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, cmds->num, true); - arm_smmu_cmdq_batch_init(smmu, cmds); + arm_smmu_cmdq_batch_init(smmu, cmds, cmd); } if (cmds->num == CMDQ_BATCH_ENTRIES) { arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmdq, cmds->cmds, cmds->num, false); - arm_smmu_cmdq_batch_init(smmu, cmds); + arm_smmu_cmdq_batch_init(smmu, cmds, cmd); } index = cmds->num * CMDQ_ENT_DWORDS; @@ -1205,7 +1207,7 @@ static void arm_smmu_sync_cd(struct arm_smmu_master *master, }, }; - arm_smmu_cmdq_batch_init(smmu, &cmds); + arm_smmu_cmdq_batch_init(smmu, &cmds, &cmd); for (i = 0; i < master->num_streams; i++) { cmd.cfgi.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(smmu, &cmds, &cmd); @@ -2056,7 +2058,7 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master, arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd); - arm_smmu_cmdq_batch_init(master->smmu, &cmds); + arm_smmu_cmdq_batch_init(master->smmu, &cmds, &cmd); for (i = 0; i < master->num_streams; i++) { cmd.atc.sid = master->streams[i].id; arm_smmu_cmdq_batch_add(master->smmu, &cmds, &cmd); @@ -2071,7 +2073,9 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, struct arm_smmu_master_domain *master_domain; int i; unsigned long flags; - struct arm_smmu_cmdq_ent cmd; + struct arm_smmu_cmdq_ent cmd = { + .opcode = CMDQ_OP_ATC_INV, + }; struct arm_smmu_cmdq_batch cmds; if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) @@ -2094,7 +2098,7 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, if (!atomic_read(&smmu_domain->nr_ats_masters)) return 0; - arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds); + arm_smmu_cmdq_batch_init(smmu_domain->smmu, &cmds, &cmd); spin_lock_irqsave(&smmu_domain->devices_lock, flags); list_for_each_entry(master_domain, &smmu_domain->devices, @@ -2176,7 +2180,7 @@ static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, num_pages++; } - arm_smmu_cmdq_batch_init(smmu, &cmds); + arm_smmu_cmdq_batch_init(smmu, &cmds, cmd); while (iova < end) { if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) { diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 4deb40cfe2e1..4d5af5ac8a63 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -642,7 +642,8 @@ struct arm_smmu_strtab_cfg { struct arm_smmu_impl_ops { int (*device_reset)(struct arm_smmu_device *smmu); void (*device_remove)(struct arm_smmu_device *smmu); - struct arm_smmu_cmdq *(*get_secondary_cmdq)(struct arm_smmu_device *smmu); + struct arm_smmu_cmdq *(*get_secondary_cmdq)( + struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent); }; /* An SMMUv3 instance */ diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index 5ac3032ee6dd..9eb9d959f3e5 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -142,6 +142,7 @@ struct tegra241_vcmdq { * struct tegra241_vintf - Virtual Interface * @idx: Global index in the CMDQV * @enabled: Enable status + * @hyp_own: Owned by hypervisor (in-kernel) * @cmdqv: Parent CMDQV pointer * @lvcmdqs: List of logical VCMDQ pointers * @base: MMIO base address @@ -150,6 +151,7 @@ struct tegra241_vintf { u16 idx; bool enabled; + bool hyp_own; struct tegra241_cmdqv *cmdqv; struct tegra241_vcmdq **lvcmdqs; @@ -301,8 +303,21 @@ static irqreturn_t tegra241_cmdqv_isr(int irq, void *devid) /* Command Queue Function */ +static bool tegra241_guest_vcmdq_supports_cmd(struct arm_smmu_cmdq_ent *ent) +{ + switch (ent->opcode) { + case CMDQ_OP_TLBI_NH_ASID: + case CMDQ_OP_TLBI_NH_VA: + case CMDQ_OP_ATC_INV: + return true; + default: + return false; + } +} + static struct arm_smmu_cmdq * -tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu) +tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu, + struct arm_smmu_cmdq_ent *ent) { struct tegra241_cmdqv *cmdqv = container_of(smmu, struct tegra241_cmdqv, smmu); @@ -328,6 +343,10 @@ tegra241_cmdqv_get_cmdq(struct arm_smmu_device *smmu) vcmdq = vintf->lvcmdqs[lidx]; if (!vcmdq || !READ_ONCE(vcmdq->enabled)) return NULL; + + /* Unsupported CMD goes for smmu->cmdq pathway */ + if (!arm_smmu_cmdq_supports_cmd(&vcmdq->cmdq, ent)) + return NULL; return &vcmdq->cmdq; } @@ -406,12 +425,22 @@ static int tegra241_vintf_hw_init(struct tegra241_vintf *vintf, bool hyp_own) tegra241_vintf_hw_deinit(vintf); /* Configure and enable VINTF */ + /* + * Note that HYP_OWN bit is wired to zero when running in guest kernel, + * whether enabling it here or not, as !HYP_OWN cmdq HWs only support a + * restricted set of supported commands. + */ regval = FIELD_PREP(VINTF_HYP_OWN, hyp_own); writel(regval, REG_VINTF(vintf, CONFIG)); ret = vintf_write_config(vintf, regval | VINTF_EN); if (ret) return ret; + /* + * As being mentioned above, HYP_OWN bit is wired to zero for a guest + * kernel, so read it back from HW to ensure that reflects in hyp_own + */ + vintf->hyp_own = !!(VINTF_HYP_OWN & readl(REG_VINTF(vintf, CONFIG))); for (lidx = 0; lidx < vintf->cmdqv->num_lvcmdqs_per_vintf; lidx++) { if (vintf->lvcmdqs && vintf->lvcmdqs[lidx]) { @@ -493,6 +522,9 @@ static int tegra241_vcmdq_alloc_smmu_cmdq(struct tegra241_vcmdq *vcmdq) q->q_base = q->base_dma & VCMDQ_ADDR; q->q_base |= FIELD_PREP(VCMDQ_LOG2SIZE, q->llq.max_n_shift); + if (!vcmdq->vintf->hyp_own) + cmdq->supports_cmd = tegra241_guest_vcmdq_supports_cmd; + return arm_smmu_cmdq_init(smmu, cmdq); } |