diff options
author | Colton Lewis <coltonlewis@google.com> | 2022-11-07 19:22:07 +0100 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2022-11-16 19:57:19 +0100 |
commit | 6864c6442f4dfa02c7cf48199cf3ea6bb1fe74ed (patch) | |
tree | 53f27995f75db67791895886364121aa6480740f /tools/testing/selftests/kvm/lib/perf_test_util.c | |
parent | KVM: selftests: create -r argument to specify random seed (diff) | |
download | linux-6864c6442f4dfa02c7cf48199cf3ea6bb1fe74ed.tar.xz linux-6864c6442f4dfa02c7cf48199cf3ea6bb1fe74ed.zip |
KVM: selftests: randomize which pages are written vs read
Randomize which pages are written vs read using the random number
generator.
Change the variable wr_fract and associated function calls to
write_percent that now operates as a percentage from 0 to 100 where X
means each page has an X% chance of being written. Change the -f
argument to -w to reflect the new variable semantics. Keep the same
default of 100% writes.
Population always uses 100% writes to ensure all memory is actually
populated and not just mapped to the zero page. The prevents expensive
copy-on-write faults from occurring during the dirty memory iterations
below, which would pollute the performance results.
Each vCPU calculates its own random seed by adding its index to the
seed provided.
Signed-off-by: Colton Lewis <coltonlewis@google.com>
Reviewed-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20221107182208.479157-4-coltonlewis@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools/testing/selftests/kvm/lib/perf_test_util.c')
-rw-r--r-- | tools/testing/selftests/kvm/lib/perf_test_util.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/tools/testing/selftests/kvm/lib/perf_test_util.c b/tools/testing/selftests/kvm/lib/perf_test_util.c index d48ee4f604f0..15000f71cdee 100644 --- a/tools/testing/selftests/kvm/lib/perf_test_util.c +++ b/tools/testing/selftests/kvm/lib/perf_test_util.c @@ -48,10 +48,13 @@ void perf_test_guest_code(uint32_t vcpu_idx) { struct perf_test_args *pta = &perf_test_args; struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_idx]; + struct guest_random_state rand_state; uint64_t gva; uint64_t pages; int i; + rand_state = new_guest_random_state(pta->random_seed + vcpu_idx); + gva = vcpu_args->gva; pages = vcpu_args->pages; @@ -62,7 +65,7 @@ void perf_test_guest_code(uint32_t vcpu_idx) for (i = 0; i < pages; i++) { uint64_t addr = gva + (i * pta->guest_page_size); - if (i % pta->wr_fract == 0) + if (guest_random_u32(&rand_state) % 100 < pta->write_percent) *(uint64_t *)addr = 0x0123456789ABCDEF; else READ_ONCE(*(uint64_t *)addr); @@ -123,7 +126,7 @@ struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int nr_vcpus, pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); /* By default vCPUs will write to memory. */ - pta->wr_fract = 1; + pta->write_percent = 100; /* * Snapshot the non-huge page size. This is used by the guest code to @@ -225,10 +228,10 @@ void perf_test_destroy_vm(struct kvm_vm *vm) kvm_vm_free(vm); } -void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract) +void perf_test_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) { - perf_test_args.wr_fract = wr_fract; - sync_global_to_guest(vm, perf_test_args); + perf_test_args.write_percent = write_percent; + sync_global_to_guest(vm, perf_test_args.write_percent); } void perf_test_set_random_seed(struct kvm_vm *vm, uint32_t random_seed) |