summaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-11-13 12:31:54 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2024-11-13 12:31:54 +0100
commitc59de1413391868057cfe70b45dbce66de643064 (patch)
tree6a98cce8dd632ba6d06a5d56e78344b6823ec032 /virt/kvm
parentMerge tag 'kvm-x86-generic-6.13' of https://github.com/kvm-x86/linux into HEAD (diff)
parentKVM: x86/mmu: Drop per-VM zapped_obsolete_pages list (diff)
downloadlinux-c59de1413391868057cfe70b45dbce66de643064.tar.xz
linux-c59de1413391868057cfe70b45dbce66de643064.zip
Merge tag 'kvm-x86-mmu-6.13' of https://github.com/kvm-x86/linux into HEAD
KVM x86 MMU changes for 6.13 - Cleanup KVM's handling of Accessed and Dirty bits to dedup code, improve documentation, harden against unexpected changes, and to simplify A/D-disabled MMUs by using the hardware-defined A/D bits to track if a PFN is Accessed and/or Dirty. - Elide TLB flushes when aging SPTEs, as has been done in x86's primary MMU for over 10 years. - Batch TLB flushes when zapping collapsible TDP MMU SPTEs, i.e. when dirty logging is toggled off, which reduces the time it takes to disable dirty logging by ~3x. - Recover huge pages in-place in the TDP MMU instead of zapping the SP and waiting until the page is re-accessed to create a huge mapping. Proactively installing huge pages can reduce vCPU jitter in extreme scenarios. - Remove support for (poorly) reclaiming page tables in shadow MMUs via the primary MMU's shrinker interface.
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/Kconfig4
-rw-r--r--virt/kvm/kvm_main.c20
2 files changed, 10 insertions, 14 deletions
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index fd6a3010afa8..54e959e7d68f 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -100,6 +100,10 @@ config KVM_GENERIC_MMU_NOTIFIER
select MMU_NOTIFIER
bool
+config KVM_ELIDE_TLB_FLUSH_IF_YOUNG
+ depends on KVM_GENERIC_MMU_NOTIFIER
+ bool
+
config KVM_GENERIC_MEMORY_ATTRIBUTES
depends on KVM_GENERIC_MMU_NOTIFIER
bool
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ec6fc8164f66..27186b06518a 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -631,7 +631,8 @@ mmu_unlock:
static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
- gfn_handler_t handler)
+ gfn_handler_t handler,
+ bool flush_on_ret)
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
const struct kvm_mmu_notifier_range range = {
@@ -639,7 +640,7 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
.end = end,
.handler = handler,
.on_lock = (void *)kvm_null_fn,
- .flush_on_ret = true,
+ .flush_on_ret = flush_on_ret,
.may_block = false,
};
@@ -651,17 +652,7 @@ static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn
unsigned long end,
gfn_handler_t handler)
{
- struct kvm *kvm = mmu_notifier_to_kvm(mn);
- const struct kvm_mmu_notifier_range range = {
- .start = start,
- .end = end,
- .handler = handler,
- .on_lock = (void *)kvm_null_fn,
- .flush_on_ret = false,
- .may_block = false,
- };
-
- return __kvm_handle_hva_range(kvm, &range).ret;
+ return kvm_handle_hva_range(mn, start, end, handler, false);
}
void kvm_mmu_invalidate_begin(struct kvm *kvm)
@@ -826,7 +817,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, kvm_age_gfn);
+ return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
+ !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,