summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu/mmu.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2024-08-09 21:43:23 +0200
committerSean Christopherson <seanjc@google.com>2024-09-10 05:22:02 +0200
commit5b1fb116e1a636701627a6eb202d17be93e8f7a8 (patch)
tree0c9a1868172fb59ef9301ac89fd11b8b152e3c82 /arch/x86/kvm/mmu/mmu.c
parentKVM: x86/mmu: Move walk_slot_rmaps() up near for_each_slot_rmap_range() (diff)
downloadlinux-5b1fb116e1a636701627a6eb202d17be93e8f7a8.tar.xz
linux-5b1fb116e1a636701627a6eb202d17be93e8f7a8.zip
KVM: x86/mmu: Plumb a @can_yield parameter into __walk_slot_rmaps()
Add a @can_yield param to __walk_slot_rmaps() to control whether or not dropping mmu_lock and conditionally rescheduling is allowed. This will allow using __walk_slot_rmaps() and thus cond_resched() to handle mmu_notifier invalidations, which usually allow blocking/yielding, but not when invoked by the OOM killer. Link: https://lore.kernel.org/r/20240809194335.1726916-12-seanjc@google.com Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu.c')
-rw-r--r--arch/x86/kvm/mmu/mmu.c12
1 files changed, 8 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 17edf1499be7..e3adc934559d 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1526,7 +1526,8 @@ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
slot_rmaps_handler fn,
int start_level, int end_level,
gfn_t start_gfn, gfn_t end_gfn,
- bool flush_on_yield, bool flush)
+ bool can_yield, bool flush_on_yield,
+ bool flush)
{
struct slot_rmap_walk_iterator iterator;
@@ -1537,6 +1538,9 @@ static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
if (iterator.rmap)
flush |= fn(kvm, iterator.rmap, slot);
+ if (!can_yield)
+ continue;
+
if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
if (flush && flush_on_yield) {
kvm_flush_remote_tlbs_range(kvm, start_gfn,
@@ -1558,7 +1562,7 @@ static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
{
return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
slot->base_gfn, slot->base_gfn + slot->npages - 1,
- flush_on_yield, false);
+ true, flush_on_yield, false);
}
static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
@@ -6600,7 +6604,7 @@ static bool kvm_rmap_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_e
flush = __walk_slot_rmaps(kvm, memslot, __kvm_zap_rmap,
PG_LEVEL_4K, KVM_MAX_HUGEPAGE_LEVEL,
- start, end - 1, true, flush);
+ start, end - 1, true, true, flush);
}
}
@@ -6888,7 +6892,7 @@ static void kvm_shadow_mmu_try_split_huge_pages(struct kvm *kvm,
*/
for (level = KVM_MAX_HUGEPAGE_LEVEL; level > target_level; level--)
__walk_slot_rmaps(kvm, slot, shadow_mmu_try_split_huge_pages,
- level, level, start, end - 1, true, false);
+ level, level, start, end - 1, true, true, false);
}
/* Must be called with the mmu_lock held in write-mode. */