diff options
author | Sean Christopherson <seanjc@google.com> | 2024-06-28 02:55:56 +0200 |
---|---|---|
committer | Sean Christopherson <seanjc@google.com> | 2024-06-28 17:44:03 +0200 |
commit | 5bb9af07d37c95ac83725ed0f92c2a4315ade0ae (patch) | |
tree | f7e248ab9eb3f1921d756b1f65a80fb441d27b2b | |
parent | KVM: selftests: Print the seed for the guest pRNG iff it has changed (diff) | |
download | linux-5bb9af07d37c95ac83725ed0f92c2a4315ade0ae.tar.xz linux-5bb9af07d37c95ac83725ed0f92c2a4315ade0ae.zip |
KVM: selftests: Rework macros in PMU counters test to prep for multi-insn loop
Tweak the macros in the PMU counters test to prepare for moving the
CLFLUSH+MFENCE instructions into the loop body, to fix an issue where
a single CLFUSH doesn't guarantee an LLC miss.
Link: https://lore.kernel.org/r/20240628005558.3835480-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
-rw-r--r-- | tools/testing/selftests/kvm/x86_64/pmu_counters_test.c | 28 |
1 files changed, 19 insertions, 9 deletions
diff --git a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c index 96446134c00b..bb40d7c0f83e 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_counters_test.c @@ -7,15 +7,25 @@ #include "pmu.h" #include "processor.h" -/* Number of LOOP instructions for the guest measurement payload. */ -#define NUM_BRANCHES 10 +/* Number of iterations of the loop for the guest measurement payload. */ +#define NUM_LOOPS 10 + +/* Each iteration of the loop retires one branch instruction. */ +#define NUM_BRANCH_INSNS_RETIRED (NUM_LOOPS) + +/* Number of instructions in each loop. */ +#define NUM_INSNS_PER_LOOP 1 + /* * Number of "extra" instructions that will be counted, i.e. the number of - * instructions that are needed to set up the loop and then disabled the + * instructions that are needed to set up the loop and then disable the * counter. 1 CLFLUSH/CLFLUSHOPT/NOP, 1 MFENCE, 2 MOV, 2 XOR, 1 WRMSR. */ -#define NUM_EXTRA_INSNS 7 -#define NUM_INSNS_RETIRED (NUM_BRANCHES + NUM_EXTRA_INSNS) +#define NUM_EXTRA_INSNS 7 + +/* Total number of instructions retired within the measured section. */ +#define NUM_INSNS_RETIRED (NUM_LOOPS * NUM_INSNS_PER_LOOP + NUM_EXTRA_INSNS) + static uint8_t kvm_pmu_version; static bool kvm_has_perf_caps; @@ -100,7 +110,7 @@ static void guest_assert_event_count(uint8_t idx, GUEST_ASSERT_EQ(count, NUM_INSNS_RETIRED); break; case INTEL_ARCH_BRANCHES_RETIRED_INDEX: - GUEST_ASSERT_EQ(count, NUM_BRANCHES); + GUEST_ASSERT_EQ(count, NUM_BRANCH_INSNS_RETIRED); break; case INTEL_ARCH_LLC_REFERENCES_INDEX: case INTEL_ARCH_LLC_MISSES_INDEX: @@ -120,7 +130,7 @@ static void guest_assert_event_count(uint8_t idx, } sanity_checks: - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS})); GUEST_ASSERT_EQ(_rdpmc(pmc), count); wrmsr(pmc_msr, 0xdead); @@ -147,7 +157,7 @@ do { \ __asm__ __volatile__("wrmsr\n\t" \ clflush "\n\t" \ "mfence\n\t" \ - "1: mov $" __stringify(NUM_BRANCHES) ", %%ecx\n\t" \ + "1: mov $" __stringify(NUM_LOOPS) ", %%ecx\n\t" \ FEP "loop .\n\t" \ FEP "mov %%edi, %%ecx\n\t" \ FEP "xor %%eax, %%eax\n\t" \ @@ -500,7 +510,7 @@ static void guest_test_fixed_counters(void) wrmsr(MSR_CORE_PERF_FIXED_CTR0 + i, 0); wrmsr(MSR_CORE_PERF_FIXED_CTR_CTRL, FIXED_PMC_CTRL(i, FIXED_PMC_KERNEL)); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, FIXED_PMC_GLOBAL_CTRL_ENABLE(i)); - __asm__ __volatile__("loop ." : "+c"((int){NUM_BRANCHES})); + __asm__ __volatile__("loop ." : "+c"((int){NUM_LOOPS})); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0); val = rdmsr(MSR_CORE_PERF_FIXED_CTR0 + i); |