summaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/sys_regs.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2024-10-08 01:30:28 +0200
committerMarc Zyngier <maz@kernel.org>2024-10-08 11:40:27 +0200
commit79cc6cdb932a5cf1a1ee05f6de12a7d102818d21 (patch)
treef41d3cb833f797542ad4e6d1e245f682ce3a1fc6 /arch/arm64/kvm/sys_regs.c
parentKVM: arm64: nv: Punt stage-2 recycling to a vCPU request (diff)
downloadlinux-79cc6cdb932a5cf1a1ee05f6de12a7d102818d21.tar.xz
linux-79cc6cdb932a5cf1a1ee05f6de12a7d102818d21.zip
KVM: arm64: nv: Clarify safety of allowing TLBI unmaps to reschedule
There's been a decent amount of attention around unmaps of nested MMUs, and TLBI handling is no exception to this. Add a comment clarifying why it is safe to reschedule during a TLBI unmap, even without a reference on the MMU in progress. Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20241007233028.2236133-5-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to '')
-rw-r--r--arch/arm64/kvm/sys_regs.c27
1 files changed, 27 insertions, 0 deletions
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index edd385baf270..1d84bc3396cb 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2989,6 +2989,29 @@ union tlbi_info {
static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
const union tlbi_info *info)
{
+ /*
+ * The unmap operation is allowed to drop the MMU lock and block, which
+ * means that @mmu could be used for a different context than the one
+ * currently being invalidated.
+ *
+ * This behavior is still safe, as:
+ *
+ * 1) The vCPU(s) that recycled the MMU are responsible for invalidating
+ * the entire MMU before reusing it, which still honors the intent
+ * of a TLBI.
+ *
+ * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
+ * and ERET to the guest), other vCPUs are allowed to use stale
+ * translations.
+ *
+ * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
+ * at worst may cause more aborts for shadow stage-2 fills.
+ *
+ * Dropping the MMU lock also implies that shadow stage-2 fills could
+ * happen behind the back of the TLBI. This is still safe, though, as
+ * the L1 needs to put its stage-2 in a consistent state before doing
+ * the TLBI.
+ */
kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
}
@@ -3084,6 +3107,10 @@ static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
base_addr &= ~(max_size - 1);
+ /*
+ * See comment in s2_mmu_unmap_range() for why this is allowed to
+ * reschedule.
+ */
kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
}