summaryrefslogtreecommitdiffstats
path: root/tools/testing/selftests/kvm/dirty_log_test.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-04-17 15:28:44 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2019-04-30 21:22:15 +0200
commit76d58e0f07ec203bbdfcaabd9a9fc10a5a3ed5ea (patch)
tree8cf55a1022132806b63cceb24b286514c2210e34 /tools/testing/selftests/kvm/dirty_log_test.c
parentx86/kvm/mmu: reset MMU context when 32-bit guest switches PAE (diff)
downloadlinux-76d58e0f07ec203bbdfcaabd9a9fc10a5a3ed5ea.tar.xz
linux-76d58e0f07ec203bbdfcaabd9a9fc10a5a3ed5ea.zip
KVM: fix KVM_CLEAR_DIRTY_LOG for memory slots of unaligned size
If a memory slot's size is not a multiple of 64 pages (256K), then the KVM_CLEAR_DIRTY_LOG API is unusable: clearing the final 64 pages either requires the requested page range to go beyond memslot->npages, or requires log->num_pages to be unaligned, and kvm_clear_dirty_log_protect requires log->num_pages to be both in range and aligned. To allow this case, allow log->num_pages not to be a multiple of 64 if it ends exactly on the last page of the slot. Reported-by: Peter Xu <peterx@redhat.com> Fixes: 98938aa8edd6 ("KVM: validate userspace input in kvm_clear_dirty_log_protect()", 2019-01-02) Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'tools/testing/selftests/kvm/dirty_log_test.c')
-rw-r--r--tools/testing/selftests/kvm/dirty_log_test.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/tools/testing/selftests/kvm/dirty_log_test.c b/tools/testing/selftests/kvm/dirty_log_test.c
index 4715cfba20dc..93f99c6b7d79 100644
--- a/tools/testing/selftests/kvm/dirty_log_test.c
+++ b/tools/testing/selftests/kvm/dirty_log_test.c
@@ -288,8 +288,11 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
#endif
max_gfn = (1ul << (guest_pa_bits - guest_page_shift)) - 1;
guest_page_size = (1ul << guest_page_shift);
- /* 1G of guest page sized pages */
- guest_num_pages = (1ul << (30 - guest_page_shift));
+ /*
+ * A little more than 1G of guest page sized pages. Cover the
+ * case where the size is not aligned to 64 pages.
+ */
+ guest_num_pages = (1ul << (30 - guest_page_shift)) + 3;
host_page_size = getpagesize();
host_num_pages = (guest_num_pages * guest_page_size) / host_page_size +
!!((guest_num_pages * guest_page_size) % host_page_size);
@@ -359,7 +362,7 @@ static void run_test(enum vm_guest_mode mode, unsigned long iterations,
kvm_vm_get_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap);
#ifdef USE_CLEAR_DIRTY_LOG
kvm_vm_clear_dirty_log(vm, TEST_MEM_SLOT_INDEX, bmap, 0,
- DIV_ROUND_UP(host_num_pages, 64) * 64);
+ host_num_pages);
#endif
vm_dirty_log_verify(bmap);
iteration++;