From 7b424ffcd45821600312ed4c794c21f7805e79d4 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 13 Sep 2023 16:56:44 +0000 Subject: KVM: arm64: Don't use kerneldoc comment for arm64_check_features() A double-asterisk opening mark to the comment (i.e. '/**') indicates a comment block is in the kerneldoc format. There's automation in place to validate that kerneldoc blocks actually adhere to the formatting rules. The function comment for arm64_check_features() isn't kerneldoc; use a 'regular' comment to silence automation warnings. Link: https://lore.kernel.org/all/202309112251.e25LqfcK-lkp@intel.com/ Reviewed-by: Zenghui Yu Link: https://lore.kernel.org/r/20230913165645.2319017-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e92ec810d449..818a52e257ed 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1228,7 +1228,7 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, return arm64_ftr_safe_value(&kvm_ftr, new, cur); } -/** +/* * arm64_check_features() - Check if a feature register value constitutes * a subset of features indicated by the idreg's KVM sanitised limit. * -- cgit v1.2.3 From ef150908b6bd80a54126dbec324bd63a24a5628a Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:29 +0000 Subject: KVM: arm64: Add generic check for system-supported vCPU features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To date KVM has relied on kvm_reset_vcpu() failing when the vCPU feature flags are unsupported by the system. This is a bit messy since kvm_reset_vcpu() is called at runtime outside of the KVM_ARM_VCPU_INIT ioctl when it is expected to succeed. Further complicating the matter is that kvm_reset_vcpu() must tolerate be idemptotent to the config_lock, as it isn't consistently called with the lock held. Prepare to move feature compatibility checks out of kvm_reset_vcpu() with a 'generic' check that compares the user-provided flags with a computed maximum feature set for the system. Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230920195036.1169791-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..8b5094343f84 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1190,6 +1190,16 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, return -EINVAL; } +static unsigned long system_supported_vcpu_features(void) +{ + unsigned long features = KVM_VCPU_VALID_FEATURES; + + if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) + clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); + + return features; +} + static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { @@ -1204,12 +1214,12 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, return -ENOENT; } + if (features & ~system_supported_vcpu_features()) + return -EINVAL; + if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) return 0; - if (!cpus_have_const_cap(ARM64_HAS_32BIT_EL1)) - return -EINVAL; - /* MTE is incompatible with AArch32 */ if (kvm_has_mte(vcpu->kvm)) return -EINVAL; -- cgit v1.2.3 From 9116db11feb521d9eaa850d3a6a8b713f59c6971 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:30 +0000 Subject: KVM: arm64: Hoist PMUv3 check into KVM_ARM_VCPU_INIT ioctl handler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Test that the system supports PMUv3 before ever getting to kvm_reset_vcpu(). Reviewed-by: Philippe Mathieu-Daudé Link: https://lore.kernel.org/r/20230920195036.1169791-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 3 +++ arch/arm64/kvm/reset.c | 5 ----- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 8b5094343f84..18cbd3c9a4d7 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1197,6 +1197,9 @@ static unsigned long system_supported_vcpu_features(void) if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); + if (!kvm_arm_support_pmu_v3()) + clear_bit(KVM_ARM_VCPU_PMU_V3, &features); + return features; } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 7a65a35ee4ac..5b5c74cb901d 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -255,11 +255,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) else pstate = VCPU_RESET_PSTATE_EL1; - if (kvm_vcpu_has_pmu(vcpu) && !kvm_arm_support_pmu_v3()) { - ret = -EINVAL; - goto out; - } - /* Reset core registers */ memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu))); memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs)); -- cgit v1.2.3 From be9c0c018389e0722a97ac5cd3152afff1111e37 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:31 +0000 Subject: KVM: arm64: Hoist SVE check into KVM_ARM_VCPU_INIT ioctl handler Test that the system supports SVE before ever getting to kvm_reset_vcpu(). Link: https://lore.kernel.org/r/20230920195036.1169791-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 3 +++ arch/arm64/kvm/reset.c | 14 +++----------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 18cbd3c9a4d7..e73e134fa2fa 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1200,6 +1200,9 @@ static unsigned long system_supported_vcpu_features(void) if (!kvm_arm_support_pmu_v3()) clear_bit(KVM_ARM_VCPU_PMU_V3, &features); + if (!system_supports_sve()) + clear_bit(KVM_ARM_VCPU_SVE, &features); + return features; } diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 5b5c74cb901d..3cb08d35b8e0 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -73,11 +73,8 @@ int __init kvm_arm_init_sve(void) return 0; } -static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) +static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) { - if (!system_supports_sve()) - return -EINVAL; - vcpu->arch.sve_max_vl = kvm_sve_max_vl; /* @@ -86,8 +83,6 @@ static int kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu) * kvm_arm_vcpu_finalize(), which freezes the configuration. */ vcpu_set_flag(vcpu, GUEST_HAS_SVE); - - return 0; } /* @@ -231,11 +226,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } if (!kvm_arm_vcpu_sve_finalized(vcpu)) { - if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) { - ret = kvm_vcpu_enable_sve(vcpu); - if (ret) - goto out; - } + if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) + kvm_vcpu_enable_sve(vcpu); } else { kvm_vcpu_reset_sve(vcpu); } -- cgit v1.2.3 From baa28a53ddbe2d27377b9a4aeff5eb8b706c8d38 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:32 +0000 Subject: KVM: arm64: Hoist PAuth checks into KVM_ARM_VCPU_INIT ioctl Test for feature support in the ioctl handler rather than kvm_reset_vcpu(). Continue to uphold our all-or-nothing policy with address and generic pointer authentication. Link: https://lore.kernel.org/r/20230920195036.1169791-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 13 +++++++++++++ arch/arm64/kvm/reset.c | 21 +++------------------ 2 files changed, 16 insertions(+), 18 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index e73e134fa2fa..ab866a7370a3 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1203,6 +1203,11 @@ static unsigned long system_supported_vcpu_features(void) if (!system_supports_sve()) clear_bit(KVM_ARM_VCPU_SVE, &features); + if (!system_has_full_ptr_auth()) { + clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features); + clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); + } + return features; } @@ -1223,6 +1228,14 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, if (features & ~system_supported_vcpu_features()) return -EINVAL; + /* + * For now make sure that both address/generic pointer authentication + * features are requested by the userspace together. + */ + if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) != + test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features)) + return -EINVAL; + if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) return 0; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 3cb08d35b8e0..bbcf5bbd66d9 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -165,20 +165,9 @@ static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu) memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu)); } -static int kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) +static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) { - /* - * For now make sure that both address/generic pointer authentication - * features are requested by the userspace together and the system - * supports these capabilities. - */ - if (!test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || - !test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features) || - !system_has_full_ptr_auth()) - return -EINVAL; - vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH); - return 0; } /** @@ -233,12 +222,8 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || - test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) { - if (kvm_vcpu_enable_ptrauth(vcpu)) { - ret = -EINVAL; - goto out; - } - } + test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) + kvm_vcpu_enable_ptrauth(vcpu); if (vcpu_el1_is_32bit(vcpu)) pstate = VCPU_RESET_PSTATE_SVC; -- cgit v1.2.3 From 12405b09926f0270f7033ed5293241180ea57343 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:33 +0000 Subject: KVM: arm64: Prevent NV feature flag on systems w/o nested virt It would appear that userspace can select the NV feature flag regardless of whether the system actually supports the feature. Obviously a nested guest isn't getting far in this situation; let's reject the flag instead. Link: https://lore.kernel.org/r/20230920195036.1169791-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ab866a7370a3..a791809fb1a1 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1208,6 +1208,9 @@ static unsigned long system_supported_vcpu_features(void) clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); } + if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT)) + clear_bit(KVM_ARM_VCPU_HAS_EL2, &features); + return features; } -- cgit v1.2.3 From d99fb82fd35e816b3656141e5dd940dfd00d09fd Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:34 +0000 Subject: KVM: arm64: Hoist NV+SVE check into KVM_ARM_VCPU_INIT ioctl handler Move the feature check out of kvm_reset_vcpu() so we can make the function succeed uncondtitionally. Link: https://lore.kernel.org/r/20230920195036.1169791-7-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 5 +++++ arch/arm64/kvm/reset.c | 8 +------- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index a791809fb1a1..995d7d905300 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1239,6 +1239,11 @@ static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features)) return -EINVAL; + /* Disallow NV+SVE for the time being */ + if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features) && + test_bit(KVM_ARM_VCPU_SVE, &features)) + return -EINVAL; + if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) return 0; diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index bbcf5bbd66d9..edffbfab5e7b 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -208,12 +208,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (loaded) kvm_arch_vcpu_put(vcpu); - /* Disallow NV+SVE for the time being */ - if (vcpu_has_nv(vcpu) && vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) { - ret = -EINVAL; - goto out; - } - if (!kvm_arm_vcpu_sve_finalized(vcpu)) { if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) kvm_vcpu_enable_sve(vcpu); @@ -267,7 +261,7 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) /* Reset timer */ ret = kvm_timer_vcpu_reset(vcpu); -out: + if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); preempt_enable(); -- cgit v1.2.3 From 3d4b2a4cddd783bc5a75585a7cb6189a8a551b22 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:35 +0000 Subject: KVM: arm64: Remove unused return value from kvm_reset_vcpu() Get rid of the return value for kvm_reset_vcpu() as there are no longer any cases where it returns a nonzero value. Link: https://lore.kernel.org/r/20230920195036.1169791-8-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/arch_timer.c | 4 +--- arch/arm64/kvm/arm.c | 10 ++++------ arch/arm64/kvm/reset.c | 6 ++---- include/kvm/arm_arch_timer.h | 2 +- 5 files changed, 9 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..cb2cde7b2682 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -78,7 +78,7 @@ extern unsigned int __ro_after_init kvm_sve_max_vl; int __init kvm_arm_init_sve(void); u32 __attribute_const__ kvm_target_cpu(void); -int kvm_reset_vcpu(struct kvm_vcpu *vcpu); +void kvm_reset_vcpu(struct kvm_vcpu *vcpu); void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu); struct kvm_hyp_memcache { diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 6dcdae4d38cb..74d7d57ba6fc 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -943,7 +943,7 @@ void kvm_timer_sync_user(struct kvm_vcpu *vcpu) unmask_vtimer_irq_user(vcpu); } -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); struct timer_map map; @@ -987,8 +987,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) soft_timer_cancel(&map.emul_vtimer->hrtimer); if (map.emul_ptimer) soft_timer_cancel(&map.emul_ptimer->hrtimer); - - return 0; } static void timer_context_init(struct kvm_vcpu *vcpu, int timerid) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 995d7d905300..ac30c4f3d55c 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1282,15 +1282,12 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); /* Now we know what it is, we can reset it. */ - ret = kvm_reset_vcpu(vcpu); - if (ret) { - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); - goto out_unlock; - } + kvm_reset_vcpu(vcpu); bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); vcpu_set_flag(vcpu, VCPU_INITIALIZED); + ret = 0; out_unlock: mutex_unlock(&kvm->arch.config_lock); return ret; @@ -1315,7 +1312,8 @@ static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, if (kvm_vcpu_init_changed(vcpu, init)) return -EINVAL; - return kvm_reset_vcpu(vcpu); + kvm_reset_vcpu(vcpu); + return 0; } static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index edffbfab5e7b..96ef9b7e74d4 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -188,10 +188,9 @@ static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu) * disable preemption around the vcpu reset as we would otherwise race with * preempt notifiers which also call put/load. */ -int kvm_reset_vcpu(struct kvm_vcpu *vcpu) +void kvm_reset_vcpu(struct kvm_vcpu *vcpu) { struct vcpu_reset_state reset_state; - int ret; bool loaded; u32 pstate; @@ -260,12 +259,11 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) } /* Reset timer */ - ret = kvm_timer_vcpu_reset(vcpu); + kvm_timer_vcpu_reset(vcpu); if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); preempt_enable(); - return ret; } u32 get_kvm_ipa_limit(void) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index bb3cb005873e..8adf09dbc473 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -94,7 +94,7 @@ struct arch_timer_cpu { int __init kvm_timer_hyp_init(bool has_gic); int kvm_timer_enable(struct kvm_vcpu *vcpu); -int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); +void kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); void kvm_timer_sync_user(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From 1de10b7d13a971f9ab90c5ed300b76d6e0c0db38 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 19:50:36 +0000 Subject: KVM: arm64: Get rid of vCPU-scoped feature bitmap The vCPU-scoped feature bitmap was left in place a couple of releases ago in case the change to VM-scoped vCPU features broke anyone. Nobody has complained and the interop between VM and vCPU bitmaps is pretty gross. Throw it out. Link: https://lore.kernel.org/r/20230920195036.1169791-9-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 13 ++++++------- arch/arm64/include/asm/kvm_host.h | 3 --- arch/arm64/include/asm/kvm_nested.h | 3 ++- arch/arm64/kvm/arm.c | 9 ++++----- arch/arm64/kvm/hypercalls.c | 2 +- arch/arm64/kvm/reset.c | 6 +++--- include/kvm/arm_pmu.h | 2 +- include/kvm/arm_psci.h | 2 +- 8 files changed, 18 insertions(+), 22 deletions(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3d6725ff0bf6..965b4cd8c247 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -54,6 +54,11 @@ void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); int kvm_inject_nested_irq(struct kvm_vcpu *vcpu); +static inline bool vcpu_has_feature(const struct kvm_vcpu *vcpu, int feature) +{ + return test_bit(feature, vcpu->kvm->arch.vcpu_features); +} + #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__) static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { @@ -62,7 +67,7 @@ static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) #else static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) { - return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features); + return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); } #endif @@ -565,12 +570,6 @@ static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) vcpu_set_flag((v), e); \ } while (0) - -static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature) -{ - return test_bit(feature, vcpu->arch.features); -} - static __always_inline void kvm_write_cptr_el2(u64 val) { if (has_vhe() || has_hvhe()) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index cb2cde7b2682..c3a17888f183 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -574,9 +574,6 @@ struct kvm_vcpu_arch { /* Cache some mmu pages needed inside spinlock regions */ struct kvm_mmu_memory_cache mmu_page_cache; - /* feature flags */ - DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - /* Virtual SError ESR to restore when HCR_EL2.VSE is set */ u64 vsesr_el2; diff --git a/arch/arm64/include/asm/kvm_nested.h b/arch/arm64/include/asm/kvm_nested.h index fa23cc9c2adc..6cec8e9c6c91 100644 --- a/arch/arm64/include/asm/kvm_nested.h +++ b/arch/arm64/include/asm/kvm_nested.h @@ -2,13 +2,14 @@ #ifndef __ARM64_KVM_NESTED_H #define __ARM64_KVM_NESTED_H +#include #include static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu) { return (!__is_defined(__KVM_NVHE_HYPERVISOR__) && cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) && - test_bit(KVM_ARM_VCPU_HAS_EL2, vcpu->arch.features)); + vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2)); } extern bool __check_nv_sr_forward(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index ac30c4f3d55c..1bfdd583b261 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -367,7 +367,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) /* Force users to call KVM_ARM_VCPU_INIT */ vcpu_clear_flag(vcpu, VCPU_INITIALIZED); - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; @@ -1263,7 +1262,8 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, { unsigned long features = init->features[0]; - return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); + return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, + KVM_VCPU_MAX_FEATURES); } static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, @@ -1276,15 +1276,14 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, mutex_lock(&kvm->arch.config_lock); if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && - !bitmap_equal(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES)) + kvm_vcpu_init_changed(vcpu, init)) goto out_unlock; - bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); + bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); /* Now we know what it is, we can reset it. */ kvm_reset_vcpu(vcpu); - bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); vcpu_set_flag(vcpu, VCPU_INITIALIZED); ret = 0; diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 7fb4df0456de..1b79219c590c 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -554,7 +554,7 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { bool wants_02; - wants_02 = test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features); + wants_02 = vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2); switch (val) { case KVM_ARM_PSCI_0_1: diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 96ef9b7e74d4..5bb4de162cab 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -208,14 +208,14 @@ void kvm_reset_vcpu(struct kvm_vcpu *vcpu) kvm_arch_vcpu_put(vcpu); if (!kvm_arm_vcpu_sve_finalized(vcpu)) { - if (test_bit(KVM_ARM_VCPU_SVE, vcpu->arch.features)) + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE)) kvm_vcpu_enable_sve(vcpu); } else { kvm_vcpu_reset_sve(vcpu); } - if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, vcpu->arch.features) || - test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, vcpu->arch.features)) + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) || + vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC)) kvm_vcpu_enable_ptrauth(vcpu); if (vcpu_el1_is_32bit(vcpu)) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 31029f4f7be8..3546ebc469ad 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -77,7 +77,7 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); void kvm_vcpu_pmu_resync_el0(void); #define kvm_vcpu_has_pmu(vcpu) \ - (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) + (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PMU_V3)) /* * Updates the vcpu's view of the pmu events for this cpu. diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h index 6e55b9283789..e8fb624013d1 100644 --- a/include/kvm/arm_psci.h +++ b/include/kvm/arm_psci.h @@ -26,7 +26,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu) * revisions. It is thus safe to return the latest, unless * userspace has instructed us otherwise. */ - if (test_bit(KVM_ARM_VCPU_PSCI_0_2, vcpu->arch.features)) { + if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PSCI_0_2)) { if (vcpu->kvm->arch.psci_version) return vcpu->kvm->arch.psci_version; -- cgit v1.2.3 From ec1c3b9ff16082f880b304be40992568f4eee6a7 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 08:01:32 +0000 Subject: arm64: tlbflush: Rename MAX_TLBI_OPS Perhaps unsurprisingly, I-cache invalidations suffer from performance issues similar to TLB invalidations on certain systems. TLB and I-cache maintenance all result in DVM on the mesh, which is where the real bottleneck lies. Rename the heuristic to point the finger at DVM, such that it may be reused for limiting I-cache invalidations. Reviewed-by: Gavin Shan Tested-by: Gavin Shan Acked-by: Will Deacon Link: https://lore.kernel.org/r/20230920080133.944717-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/tlbflush.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b149cf9f91bc..3431d37e5054 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -333,7 +333,7 @@ static inline void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) * This is meant to avoid soft lock-ups on large TLB flushing ranges and not * necessarily a performance improvement. */ -#define MAX_TLBI_OPS PTRS_PER_PTE +#define MAX_DVM_OPS PTRS_PER_PTE /* * __flush_tlb_range_op - Perform TLBI operation upon a range @@ -413,12 +413,12 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, /* * When not uses TLB range ops, we can handle up to - * (MAX_TLBI_OPS - 1) pages; + * (MAX_DVM_OPS - 1) pages; * When uses TLB range ops, we can handle up to * (MAX_TLBI_RANGE_PAGES - 1) pages. */ if ((!system_supports_tlb_range() && - (end - start) >= (MAX_TLBI_OPS * stride)) || + (end - start) >= (MAX_DVM_OPS * stride)) || pages >= MAX_TLBI_RANGE_PAGES) { flush_tlb_mm(vma->vm_mm); return; @@ -451,7 +451,7 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end { unsigned long addr; - if ((end - start) > (MAX_TLBI_OPS * PAGE_SIZE)) { + if ((end - start) > (MAX_DVM_OPS * PAGE_SIZE)) { flush_tlb_all(); return; } -- cgit v1.2.3 From 909b583f81b5bb5a398d4580543f59b908a86ccc Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 20 Sep 2023 08:01:33 +0000 Subject: KVM: arm64: Avoid soft lockups due to I-cache maintenance Gavin reports of soft lockups on his Ampere Altra Max machine when backing KVM guests with hugetlb pages. Upon further investigation, it was found that the system is unable to keep up with parallel I-cache invalidations done by KVM's stage-2 fault handler. This is ultimately an implementation problem. I-cache maintenance instructions are available at EL0, so nothing stops a malicious userspace from hammering a system with CMOs and cause it to fall over. "Fixing" this problem in KVM is nothing more than slapping a bandage over a much deeper problem. Anyway, the kernel already has a heuristic for limiting TLB invalidations to avoid soft lockups. Reuse that logic to limit I-cache CMOs done by KVM to map executable pages on systems without FEAT_DIC. While at it, restructure __invalidate_icache_guest_page() to improve readability and squeeze our new condition into the existing branching structure. Link: https://lore.kernel.org/kvmarm/20230904072826.1468907-1-gshan@redhat.com/ Reviewed-by: Gavin Shan Tested-by: Gavin Shan Link: https://lore.kernel.org/r/20230920080133.944717-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_mmu.h | 37 +++++++++++++++++++++++++++++++------ 1 file changed, 31 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 96a80e8f6226..a425ecdd7be0 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -224,16 +224,41 @@ static inline void __clean_dcache_guest_page(void *va, size_t size) kvm_flush_dcache_to_poc(va, size); } +static inline size_t __invalidate_icache_max_range(void) +{ + u8 iminline; + u64 ctr; + + asm volatile(ALTERNATIVE_CB("movz %0, #0\n" + "movk %0, #0, lsl #16\n" + "movk %0, #0, lsl #32\n" + "movk %0, #0, lsl #48\n", + ARM64_ALWAYS_SYSTEM, + kvm_compute_final_ctr_el0) + : "=r" (ctr)); + + iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2; + return MAX_DVM_OPS << iminline; +} + static inline void __invalidate_icache_guest_page(void *va, size_t size) { - if (icache_is_aliasing()) { - /* any kind of VIPT cache */ + /* + * VPIPT I-cache maintenance must be done from EL2. See comment in the + * nVHE flavor of __kvm_tlb_flush_vmid_ipa(). + */ + if (icache_is_vpipt() && read_sysreg(CurrentEL) != CurrentEL_EL2) + return; + + /* + * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the + * invalidation range exceeds our arbitrary limit on invadations by + * cache line. + */ + if (icache_is_aliasing() || size > __invalidate_icache_max_range()) icache_inval_all_pou(); - } else if (read_sysreg(CurrentEL) != CurrentEL_EL1 || - !icache_is_vpipt()) { - /* PIPT or VPIPT at EL2 (see comment in __kvm_tlb_flush_vmid_ipa) */ + else icache_inval_pou((unsigned long)va, (unsigned long)va + size); - } } void kvm_set_way_flush(struct kvm_vcpu *vcpu); -- cgit v1.2.3 From c04bf723ccd639250fa680a0fb4e1ac15bb84c3b Mon Sep 17 00:00:00 2001 From: Vincent Donnefort Date: Thu, 28 Sep 2023 18:32:03 +0100 Subject: KVM: arm64: Do not transfer page refcount for THP adjustment GUP affects a refcount common to all pages forming the THP. There is therefore no need to move the refcount from a tail to the head page. Under the hood it decrements and increments the same counter. Signed-off-by: Vincent Donnefort Reviewed-by: Gavin Shan Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230928173205.2826598-2-vdonnefort@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/mmu.c | 20 -------------------- 1 file changed, 20 deletions(-) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 587a104f66c3..de5e5148ef5d 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1295,28 +1295,8 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, if (sz < PMD_SIZE) return PAGE_SIZE; - /* - * The address we faulted on is backed by a transparent huge - * page. However, because we map the compound huge page and - * not the individual tail page, we need to transfer the - * refcount to the head page. We have to be careful that the - * THP doesn't start to split while we are adjusting the - * refcounts. - * - * We are sure this doesn't happen, because mmu_invalidate_retry - * was successful and we are holding the mmu_lock, so if this - * THP is trying to split, it will be blocked in the mmu - * notifier before touching any of the pages, specifically - * before being able to call __split_huge_page_refcount(). - * - * We can therefore safely transfer the refcount from PG_tail - * to PG_head and switch the pfn from a tail page to the head - * page accordingly. - */ *ipap &= PMD_MASK; - kvm_release_pfn_clean(pfn); pfn &= ~(PTRS_PER_PMD - 1); - get_page(pfn_to_page(pfn)); *pfnp = pfn; return PMD_SIZE; -- cgit v1.2.3 From 9a0a75d3ccee20149587ab740a2dee31ba401ada Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:01 +0100 Subject: KVM: arm64: vgic: Make kvm_vgic_inject_irq() take a vcpu pointer Passing a vcpu_id to kvm_vgic_inject_irq() is silly for two reasons: - we often confuse vcpu_id and vcpu_idx - we eventually have to convert it back to a vcpu - we can't count Instead, pass a vcpu pointer, which is unambiguous. A NULL vcpu is also allowed for interrupts that are not private to a vcpu (such as SPIs). Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-2-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arch_timer.c | 2 +- arch/arm64/kvm/arm.c | 23 ++++++++--------------- arch/arm64/kvm/pmu-emul.c | 2 +- arch/arm64/kvm/vgic/vgic-irqfd.c | 2 +- arch/arm64/kvm/vgic/vgic.c | 12 +++++------- include/kvm/arm_vgic.h | 4 ++-- 6 files changed, 18 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 6dcdae4d38cb..1f828f3b854c 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -458,7 +458,7 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, timer_ctx->irq.level); if (!userspace_irqchip(vcpu->kvm)) { - ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, timer_irq(timer_ctx), timer_ctx->irq.level, timer_ctx); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..fa21fb15e927 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1134,27 +1134,23 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, bool line_status) { u32 irq = irq_level->irq; - unsigned int irq_type, vcpu_idx, irq_num; - int nrcpus = atomic_read(&kvm->online_vcpus); + unsigned int irq_type, vcpu_id, irq_num; struct kvm_vcpu *vcpu = NULL; bool level = irq_level->level; irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; - vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; - vcpu_idx += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); + vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; + vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; - trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); + trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); switch (irq_type) { case KVM_ARM_IRQ_TYPE_CPU: if (irqchip_in_kernel(kvm)) return -ENXIO; - if (vcpu_idx >= nrcpus) - return -EINVAL; - - vcpu = kvm_get_vcpu(kvm, vcpu_idx); + vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); if (!vcpu) return -EINVAL; @@ -1166,17 +1162,14 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, if (!irqchip_in_kernel(kvm)) return -ENXIO; - if (vcpu_idx >= nrcpus) - return -EINVAL; - - vcpu = kvm_get_vcpu(kvm, vcpu_idx); + vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id); if (!vcpu) return -EINVAL; if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) return -EINVAL; - return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); + return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL); case KVM_ARM_IRQ_TYPE_SPI: if (!irqchip_in_kernel(kvm)) return -ENXIO; @@ -1184,7 +1177,7 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, if (irq_num < VGIC_NR_PRIVATE_IRQS) return -EINVAL; - return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); + return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL); } return -EINVAL; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04dc5d..3afb281ed8d2 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -348,7 +348,7 @@ static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) pmu->irq_level = overflow; if (likely(irqchip_in_kernel(vcpu->kvm))) { - int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu, pmu->irq_num, overflow, pmu); WARN_ON(ret); } diff --git a/arch/arm64/kvm/vgic/vgic-irqfd.c b/arch/arm64/kvm/vgic/vgic-irqfd.c index 475059bacedf..8c711deb25aa 100644 --- a/arch/arm64/kvm/vgic/vgic-irqfd.c +++ b/arch/arm64/kvm/vgic/vgic-irqfd.c @@ -23,7 +23,7 @@ static int vgic_irqfd_set_irq(struct kvm_kernel_irq_routing_entry *e, if (!vgic_valid_spi(kvm, spi_id)) return -EINVAL; - return kvm_vgic_inject_irq(kvm, 0, spi_id, level, NULL); + return kvm_vgic_inject_irq(kvm, NULL, spi_id, level, NULL); } /** diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c index 8be4c1ebdec2..db2a95762b1b 100644 --- a/arch/arm64/kvm/vgic/vgic.c +++ b/arch/arm64/kvm/vgic/vgic.c @@ -422,7 +422,7 @@ retry: /** * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic * @kvm: The VM structure pointer - * @cpuid: The CPU for PPIs + * @vcpu: The CPU for PPIs or NULL for global interrupts * @intid: The INTID to inject a new state to. * @level: Edge-triggered: true: to trigger the interrupt * false: to ignore the call @@ -436,24 +436,22 @@ retry: * level-sensitive interrupts. You can think of the level parameter as 1 * being HIGH and 0 being LOW and all devices being active-HIGH. */ -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner) +int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int intid, bool level, void *owner) { - struct kvm_vcpu *vcpu; struct vgic_irq *irq; unsigned long flags; int ret; - trace_vgic_update_irq_pending(cpuid, intid, level); - ret = vgic_lazy_init(kvm); if (ret) return ret; - vcpu = kvm_get_vcpu(kvm, cpuid); if (!vcpu && intid < VGIC_NR_PRIVATE_IRQS) return -EINVAL; + trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level); + irq = vgic_get_irq(kvm, vcpu, intid); if (!irq) return -EINVAL; diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index 5b27f94d4fad..8cc38e836f54 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -375,8 +375,8 @@ int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); -int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, - bool level, void *owner); +int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, + unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); -- cgit v1.2.3 From d455d366c451e68781122c693e2e357c673ee807 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:02 +0100 Subject: KVM: arm64: vgic-its: Treat the collection target address as a vcpu_id Since our emulated ITS advertises GITS_TYPER.PTA=0, the target address associated to a collection is a PE number and not an address. So far, so good. However, the PE number is what userspace has provided given us (aka the vcpu_id), and not the internal vcpu index. Make sure we consistently retrieve the vcpu by ID rather than by index, adding a helper that deals with most of the cases. We also get rid of the pointless (and bogus) comparisons to online_vcpus, which don't really make sense. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-3-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-its.c | 49 +++++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 22 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-its.c b/arch/arm64/kvm/vgic/vgic-its.c index 5fe2365a629f..2dad2d095160 100644 --- a/arch/arm64/kvm/vgic/vgic-its.c +++ b/arch/arm64/kvm/vgic/vgic-its.c @@ -378,6 +378,12 @@ static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu) return ret; } +static struct kvm_vcpu *collection_to_vcpu(struct kvm *kvm, + struct its_collection *col) +{ + return kvm_get_vcpu_by_id(kvm, col->target_addr); +} + /* * Promotes the ITS view of affinity of an ITTE (which redistributor this LPI * is targeting) to the VGIC's view, which deals with target VCPUs. @@ -391,7 +397,7 @@ static void update_affinity_ite(struct kvm *kvm, struct its_ite *ite) if (!its_is_collection_mapped(ite->collection)) return; - vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); + vcpu = collection_to_vcpu(kvm, ite->collection); update_affinity(ite->irq, vcpu); } @@ -679,7 +685,7 @@ int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its, if (!ite || !its_is_collection_mapped(ite->collection)) return E_ITS_INT_UNMAPPED_INTERRUPT; - vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr); + vcpu = collection_to_vcpu(kvm, ite->collection); if (!vcpu) return E_ITS_INT_UNMAPPED_INTERRUPT; @@ -887,7 +893,7 @@ static int vgic_its_cmd_handle_movi(struct kvm *kvm, struct vgic_its *its, return E_ITS_MOVI_UNMAPPED_COLLECTION; ite->collection = collection; - vcpu = kvm_get_vcpu(kvm, collection->target_addr); + vcpu = collection_to_vcpu(kvm, collection); vgic_its_invalidate_cache(kvm); @@ -1121,7 +1127,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, } if (its_is_collection_mapped(collection)) - vcpu = kvm_get_vcpu(kvm, collection->target_addr); + vcpu = collection_to_vcpu(kvm, collection); irq = vgic_add_lpi(kvm, lpi_nr, vcpu); if (IS_ERR(irq)) { @@ -1242,21 +1248,22 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, u64 *its_cmd) { u16 coll_id; - u32 target_addr; struct its_collection *collection; bool valid; valid = its_cmd_get_validbit(its_cmd); coll_id = its_cmd_get_collection(its_cmd); - target_addr = its_cmd_get_target_addr(its_cmd); - - if (target_addr >= atomic_read(&kvm->online_vcpus)) - return E_ITS_MAPC_PROCNUM_OOR; if (!valid) { vgic_its_free_collection(its, coll_id); vgic_its_invalidate_cache(kvm); } else { + struct kvm_vcpu *vcpu; + + vcpu = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd)); + if (!vcpu) + return E_ITS_MAPC_PROCNUM_OOR; + collection = find_collection(its, coll_id); if (!collection) { @@ -1270,9 +1277,9 @@ static int vgic_its_cmd_handle_mapc(struct kvm *kvm, struct vgic_its *its, coll_id); if (ret) return ret; - collection->target_addr = target_addr; + collection->target_addr = vcpu->vcpu_id; } else { - collection->target_addr = target_addr; + collection->target_addr = vcpu->vcpu_id; update_affinity_collection(kvm, its, collection); } } @@ -1382,7 +1389,7 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, if (!its_is_collection_mapped(collection)) return E_ITS_INVALL_UNMAPPED_COLLECTION; - vcpu = kvm_get_vcpu(kvm, collection->target_addr); + vcpu = collection_to_vcpu(kvm, collection); vgic_its_invall(vcpu); return 0; @@ -1399,23 +1406,21 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its, static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, u64 *its_cmd) { - u32 target1_addr = its_cmd_get_target_addr(its_cmd); - u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32); struct kvm_vcpu *vcpu1, *vcpu2; struct vgic_irq *irq; u32 *intids; int irq_count, i; - if (target1_addr >= atomic_read(&kvm->online_vcpus) || - target2_addr >= atomic_read(&kvm->online_vcpus)) + /* We advertise GITS_TYPER.PTA==0, making the address the vcpu ID */ + vcpu1 = kvm_get_vcpu_by_id(kvm, its_cmd_get_target_addr(its_cmd)); + vcpu2 = kvm_get_vcpu_by_id(kvm, its_cmd_mask_field(its_cmd, 3, 16, 32)); + + if (!vcpu1 || !vcpu2) return E_ITS_MOVALL_PROCNUM_OOR; - if (target1_addr == target2_addr) + if (vcpu1 == vcpu2) return 0; - vcpu1 = kvm_get_vcpu(kvm, target1_addr); - vcpu2 = kvm_get_vcpu(kvm, target2_addr); - irq_count = vgic_copy_lpi_list(kvm, vcpu1, &intids); if (irq_count < 0) return irq_count; @@ -2258,7 +2263,7 @@ static int vgic_its_restore_ite(struct vgic_its *its, u32 event_id, return PTR_ERR(ite); if (its_is_collection_mapped(collection)) - vcpu = kvm_get_vcpu(kvm, collection->target_addr); + vcpu = kvm_get_vcpu_by_id(kvm, collection->target_addr); irq = vgic_add_lpi(kvm, lpi_id, vcpu); if (IS_ERR(irq)) { @@ -2573,7 +2578,7 @@ static int vgic_its_restore_cte(struct vgic_its *its, gpa_t gpa, int esz) coll_id = val & KVM_ITS_CTE_ICID_MASK; if (target_addr != COLLECTION_NOT_MAPPED && - target_addr >= atomic_read(&kvm->online_vcpus)) + !kvm_get_vcpu_by_id(kvm, target_addr)) return -EINVAL; collection = find_collection(its, coll_id); -- cgit v1.2.3 From f3f60a56539108dacfd87b6514398b3970daec54 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:03 +0100 Subject: KVM: arm64: vgic-v3: Refactor GICv3 SGI generation As we're about to change the way SGIs are sent, start by splitting out some of the basic functionnality: instead of intermingling the broadcast and non-broadcast cases with the actual SGI generation, perform the following cleanups: - move the SGI queuing into its own helper - split the broadcast code from the affinity-driven code - replace the mask/shift combinations with FIELD_GET() - fix the confusion between vcpu_id and vcpu when handling the broadcast case The result is much more readable, and paves the way for further optimisations. Tested-by: Joey Gouly Tested-by: Shameer Kolothum Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-4-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 110 ++++++++++++++++++++----------------- 1 file changed, 59 insertions(+), 51 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 188d2187eede..88b8d4524854 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -1052,6 +1052,38 @@ static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) ((((reg) & ICC_SGI1R_AFFINITY_## level ##_MASK) \ >> ICC_SGI1R_AFFINITY_## level ##_SHIFT) << MPIDR_LEVEL_SHIFT(level)) +static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1) +{ + struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, sgi); + unsigned long flags; + + raw_spin_lock_irqsave(&irq->irq_lock, flags); + + /* + * An access targeting Group0 SGIs can only generate + * those, while an access targeting Group1 SGIs can + * generate interrupts of either group. + */ + if (!irq->group || allow_group1) { + if (!irq->hw) { + irq->pending_latch = true; + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); + } else { + /* HW SGI? Ask the GIC to inject it */ + int err; + err = irq_set_irqchip_state(irq->host_irq, + IRQCHIP_STATE_PENDING, + true); + WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + } + } else { + raw_spin_unlock_irqrestore(&irq->irq_lock, flags); + } + + vgic_put_irq(vcpu->kvm, irq); +} + /** * vgic_v3_dispatch_sgi - handle SGI requests from VCPUs * @vcpu: The VCPU requesting a SGI @@ -1070,19 +1102,30 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) { struct kvm *kvm = vcpu->kvm; struct kvm_vcpu *c_vcpu; - u16 target_cpus; + unsigned long target_cpus; u64 mpidr; - int sgi; - int vcpu_id = vcpu->vcpu_id; - bool broadcast; - unsigned long c, flags; - - sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; - broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); - target_cpus = (reg & ICC_SGI1R_TARGET_LIST_MASK) >> ICC_SGI1R_TARGET_LIST_SHIFT; + u32 sgi; + unsigned long c; + + sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg); + + /* Broadcast */ + if (unlikely(reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT))) { + kvm_for_each_vcpu(c, c_vcpu, kvm) { + /* Don't signal the calling VCPU */ + if (c_vcpu == vcpu) + continue; + + vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1); + } + + return; + } + mpidr = SGI_AFFINITY_LEVEL(reg, 3); mpidr |= SGI_AFFINITY_LEVEL(reg, 2); mpidr |= SGI_AFFINITY_LEVEL(reg, 1); + target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg); /* * We iterate over all VCPUs to find the MPIDRs matching the request. @@ -1091,54 +1134,19 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) * VCPUs when most of the times we just signal a single VCPU. */ kvm_for_each_vcpu(c, c_vcpu, kvm) { - struct vgic_irq *irq; + int level0; /* Exit early if we have dealt with all requested CPUs */ - if (!broadcast && target_cpus == 0) + if (target_cpus == 0) break; - - /* Don't signal the calling VCPU */ - if (broadcast && c == vcpu_id) + level0 = match_mpidr(mpidr, target_cpus, c_vcpu); + if (level0 == -1) continue; - if (!broadcast) { - int level0; - - level0 = match_mpidr(mpidr, target_cpus, c_vcpu); - if (level0 == -1) - continue; - - /* remove this matching VCPU from the mask */ - target_cpus &= ~BIT(level0); - } + /* remove this matching VCPU from the mask */ + target_cpus &= ~BIT(level0); - irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); - - raw_spin_lock_irqsave(&irq->irq_lock, flags); - - /* - * An access targeting Group0 SGIs can only generate - * those, while an access targeting Group1 SGIs can - * generate interrupts of either group. - */ - if (!irq->group || allow_group1) { - if (!irq->hw) { - irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq, flags); - } else { - /* HW SGI? Ask the GIC to inject it */ - int err; - err = irq_set_irqchip_state(irq->host_irq, - IRQCHIP_STATE_PENDING, - true); - WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - } - } else { - raw_spin_unlock_irqrestore(&irq->irq_lock, flags); - } - - vgic_put_irq(vcpu->kvm, irq); + vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1); } } -- cgit v1.2.3 From 4e7728c81a54b17bd33be402ac140bc11bb0c4f4 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:04 +0100 Subject: KVM: arm64: vgic-v2: Use cpuid from userspace as vcpu_id When parsing a GICv2 attribute that contains a cpuid, handle this as the vcpu_id, not a vcpu_idx, as userspace cannot really know the mapping between the two. For this, use kvm_get_vcpu_by_id() instead of kvm_get_vcpu(). Take this opportunity to get rid of the pointless check against online_vcpus, which doesn't make much sense either, and switch to FIELD_GET as a way to extract the vcpu_id. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-5-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-kvm-device.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 212b73a715c1..c11962f901e0 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -339,13 +339,9 @@ int vgic_v2_parse_attr(struct kvm_device *dev, struct kvm_device_attr *attr, { int cpuid; - cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> - KVM_DEV_ARM_VGIC_CPUID_SHIFT; + cpuid = FIELD_GET(KVM_DEV_ARM_VGIC_CPUID_MASK, attr->attr); - if (cpuid >= atomic_read(&dev->kvm->online_vcpus)) - return -EINVAL; - - reg_attr->vcpu = kvm_get_vcpu(dev->kvm, cpuid); + reg_attr->vcpu = kvm_get_vcpu_by_id(dev->kvm, cpuid); reg_attr->addr = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; return 0; -- cgit v1.2.3 From ac0fe56d46c0d534dddcf97d132c222722b9b7a5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:05 +0100 Subject: KVM: arm64: vgic: Use vcpu_idx for the debug information When dumping the debug information, use vcpu_idx instead of vcpu_id, as this is independent of any userspace influence. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-6-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-debug.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-debug.c b/arch/arm64/kvm/vgic/vgic-debug.c index 07aa0437125a..85606a531dc3 100644 --- a/arch/arm64/kvm/vgic/vgic-debug.c +++ b/arch/arm64/kvm/vgic/vgic-debug.c @@ -166,7 +166,7 @@ static void print_header(struct seq_file *s, struct vgic_irq *irq, if (vcpu) { hdr = "VCPU"; - id = vcpu->vcpu_id; + id = vcpu->vcpu_idx; } seq_printf(s, "\n"); @@ -212,7 +212,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, " %2d " "\n", type, irq->intid, - (irq->target_vcpu) ? irq->target_vcpu->vcpu_id : -1, + (irq->target_vcpu) ? irq->target_vcpu->vcpu_idx : -1, pending, irq->line_level, irq->active, @@ -224,7 +224,7 @@ static void print_irq_state(struct seq_file *s, struct vgic_irq *irq, irq->mpidr, irq->source, irq->priority, - (irq->vcpu) ? irq->vcpu->vcpu_id : -1); + (irq->vcpu) ? irq->vcpu->vcpu_idx : -1); } static int vgic_debug_show(struct seq_file *s, void *v) -- cgit v1.2.3 From 5f4bd815ec718992c09de1f39479ae90dcbb7df3 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:06 +0100 Subject: KVM: arm64: Use vcpu_idx for invalidation tracking While vcpu_id isn't necessarily a bad choice as an identifier for the currently running vcpu, it is provided by userspace, and there is close to no guarantee that it would be unique. Switch it to vcpu_idx instead, for which we have much stronger guarantees. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-7-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index fa21fb15e927..9379a1227501 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -438,9 +438,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) * We might get preempted before the vCPU actually runs, but * over-invalidation doesn't affect correctness. */ - if (*last_ran != vcpu->vcpu_id) { + if (*last_ran != vcpu->vcpu_idx) { kvm_call_hyp(__kvm_flush_cpu_context, mmu); - *last_ran = vcpu->vcpu_id; + *last_ran = vcpu->vcpu_idx; } vcpu->cpu = cpu; -- cgit v1.2.3 From 0a2acd38d23b8865b3a5a8c851001350df76fc09 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:07 +0100 Subject: KVM: arm64: Simplify kvm_vcpu_get_mpidr_aff() By definition, MPIDR_EL1 cannot be modified by the guest. This means it is pointless to check whether this is loaded on the CPU. Simplify the kvm_vcpu_get_mpidr_aff() helper to directly access the in-memory value. Reviewed-by: Joey Gouly Reviewed-by: Zenghui Yu Tested-by: Joey Gouly Tested-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-8-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_emulate.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 3d6725ff0bf6..b66ef77cf49e 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -465,7 +465,7 @@ static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) { - return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; + return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; } static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From 5544750efd51143fee9e9184d69518c0c05426a1 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:08 +0100 Subject: KVM: arm64: Build MPIDR to vcpu index cache at runtime The MPIDR_EL1 register contains a unique value that identifies the CPU. The only problem with it is that it is stupidly large (32 bits, once the useless stuff is removed). Trying to obtain a vcpu from an MPIDR value is a fairly common, yet costly operation: we iterate over all the vcpus until we find the correct one. While this is cheap for small VMs, it is pretty expensive on large ones, specially if you are trying to get to the one that's at the end of the list... In order to help with this, it is important to realise that the MPIDR values are actually structured, and that implementations tend to use a small number of significant bits in the 32bit space. We can use this fact to our advantage by computing a small hash table that uses the "compression" of the significant MPIDR bits as an index, giving us the vcpu index as a result. Given that the MPIDR values can be supplied by userspace, and that an evil VMM could decide to make *all* bits significant, resulting in a 4G-entry table, we only use this method if the resulting table fits in a single page. Otherwise, we fallback to the good old iterative method. Nothing uses that table just yet, but keep your eyes peeled. Reviewed-by: Joey Gouly Reviewed-by: Zenghui Yu Tested-by: Joey Gouly Tested-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-9-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 28 ++++++++++++++++++++ arch/arm64/kvm/arm.c | 54 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..4b5a57a60b6a 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -202,6 +202,31 @@ struct kvm_protected_vm { struct kvm_hyp_memcache teardown_mc; }; +struct kvm_mpidr_data { + u64 mpidr_mask; + DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx); +}; + +static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr) +{ + unsigned long mask = data->mpidr_mask; + u64 aff = mpidr & MPIDR_HWID_BITMASK; + int nbits, bit, bit_idx = 0; + u16 index = 0; + + /* + * If this looks like RISC-V's BEXT or x86's PEXT + * instructions, it isn't by accident. + */ + nbits = fls(mask); + for_each_set_bit(bit, &mask, nbits) { + index |= (aff & BIT(bit)) >> (bit - bit_idx); + bit_idx++; + } + + return index; +} + struct kvm_arch { struct kvm_s2_mmu mmu; @@ -248,6 +273,9 @@ struct kvm_arch { /* VM-wide vCPU feature set */ DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES); + /* MPIDR to vcpu index mapping, optional */ + struct kvm_mpidr_data *mpidr_data; + /* * VM-wide PMU filter, implemented as a bitmap and big enough for * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+). diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 9379a1227501..b02e28f76083 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -205,6 +205,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) if (is_protected_kvm_enabled()) pkvm_destroy_hyp_vm(kvm); + kfree(kvm->arch.mpidr_data); kvm_destroy_vcpus(kvm); kvm_unshare_hyp(kvm, kvm + 1); @@ -578,6 +579,57 @@ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu) return vcpu_get_flag(vcpu, VCPU_INITIALIZED); } +static void kvm_init_mpidr_data(struct kvm *kvm) +{ + struct kvm_mpidr_data *data = NULL; + unsigned long c, mask, nr_entries; + u64 aff_set = 0, aff_clr = ~0UL; + struct kvm_vcpu *vcpu; + + mutex_lock(&kvm->arch.config_lock); + + if (kvm->arch.mpidr_data || atomic_read(&kvm->online_vcpus) == 1) + goto out; + + kvm_for_each_vcpu(c, vcpu, kvm) { + u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); + aff_set |= aff; + aff_clr &= aff; + } + + /* + * A significant bit can be either 0 or 1, and will only appear in + * aff_set. Use aff_clr to weed out the useless stuff. + */ + mask = aff_set ^ aff_clr; + nr_entries = BIT_ULL(hweight_long(mask)); + + /* + * Don't let userspace fool us. If we need more than a single page + * to describe the compressed MPIDR array, just fall back to the + * iterative method. Single vcpu VMs do not need this either. + */ + if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE) + data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries), + GFP_KERNEL_ACCOUNT); + + if (!data) + goto out; + + data->mpidr_mask = mask; + + kvm_for_each_vcpu(c, vcpu, kvm) { + u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); + u16 index = kvm_mpidr_index(data, aff); + + data->cmpidr_to_idx[index] = c; + } + + kvm->arch.mpidr_data = data; +out: + mutex_unlock(&kvm->arch.config_lock); +} + /* * Handle both the initialisation that is being done when the vcpu is * run for the first time, as well as the updates that must be @@ -601,6 +653,8 @@ int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) if (likely(vcpu_has_run_once(vcpu))) return 0; + kvm_init_mpidr_data(kvm); + kvm_arm_vcpu_init_debug(vcpu); if (likely(irqchip_in_kernel(kvm))) { -- cgit v1.2.3 From 54a8006d0b49044d0cb682119686a45de906fe3c Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:09 +0100 Subject: KVM: arm64: Fast-track kvm_mpidr_to_vcpu() when mpidr_data is available If our fancy little table is present when calling kvm_mpidr_to_vcpu(), use it to recover the corresponding vcpu. Reviewed-by: Joey Gouly Reviewed-by: Zenghui Yu Tested-by: Joey Gouly Tested-by: Shameer Kolothum Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-10-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index b02e28f76083..e3d667feecaf 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -2388,6 +2388,18 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) unsigned long i; mpidr &= MPIDR_HWID_BITMASK; + + if (kvm->arch.mpidr_data) { + u16 idx = kvm_mpidr_index(kvm->arch.mpidr_data, mpidr); + + vcpu = kvm_get_vcpu(kvm, + kvm->arch.mpidr_data->cmpidr_to_idx[idx]); + if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu)) + vcpu = NULL; + + return vcpu; + } + kvm_for_each_vcpu(i, vcpu, kvm) { if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) return vcpu; -- cgit v1.2.3 From b5daffb120bb60f974ae1a5589160b05c98e00e5 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:10 +0100 Subject: KVM: arm64: vgic-v3: Optimize affinity-based SGI injection Our affinity-based SGI injection code is a bit daft. We iterate over all the CPUs trying to match the set of affinities that the guest is trying to reach, leading to some very bad behaviours if the selected targets are at a high vcpu index. Instead, we can now use the fact that we have an optimised MPIDR to vcpu mapping, and only look at the relevant values. This results in a much faster injection for large VMs, and in a near constant time, irrespective of the position in the vcpu index space. As a bonus, this is mostly deleting a lot of hard-to-read code. Nobody will complain about that. Suggested-by: Xu Zhao Tested-by: Joey Gouly Tested-by: Shameer Kolothum Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-11-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/vgic/vgic-mmio-v3.c | 64 +++++++------------------------------- 1 file changed, 11 insertions(+), 53 deletions(-) diff --git a/arch/arm64/kvm/vgic/vgic-mmio-v3.c b/arch/arm64/kvm/vgic/vgic-mmio-v3.c index 88b8d4524854..89117ba2528a 100644 --- a/arch/arm64/kvm/vgic/vgic-mmio-v3.c +++ b/arch/arm64/kvm/vgic/vgic-mmio-v3.c @@ -1013,35 +1013,6 @@ int vgic_v3_has_attr_regs(struct kvm_device *dev, struct kvm_device_attr *attr) return 0; } -/* - * Compare a given affinity (level 1-3 and a level 0 mask, from the SGI - * generation register ICC_SGI1R_EL1) with a given VCPU. - * If the VCPU's MPIDR matches, return the level0 affinity, otherwise - * return -1. - */ -static int match_mpidr(u64 sgi_aff, u16 sgi_cpu_mask, struct kvm_vcpu *vcpu) -{ - unsigned long affinity; - int level0; - - /* - * Split the current VCPU's MPIDR into affinity level 0 and the - * rest as this is what we have to compare against. - */ - affinity = kvm_vcpu_get_mpidr_aff(vcpu); - level0 = MPIDR_AFFINITY_LEVEL(affinity, 0); - affinity &= ~MPIDR_LEVEL_MASK; - - /* bail out if the upper three levels don't match */ - if (sgi_aff != affinity) - return -1; - - /* Is this VCPU's bit set in the mask ? */ - if (!(sgi_cpu_mask & BIT(level0))) - return -1; - - return level0; -} /* * The ICC_SGI* registers encode the affinity differently from the MPIDR, @@ -1094,9 +1065,11 @@ static void vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, u32 sgi, bool allow_group1) * This will trap in sys_regs.c and call this function. * This ICC_SGI1R_EL1 register contains the upper three affinity levels of the * target processors as well as a bitmask of 16 Aff0 CPUs. - * If the interrupt routing mode bit is not set, we iterate over all VCPUs to - * check for matching ones. If this bit is set, we signal all, but not the - * calling VCPU. + * + * If the interrupt routing mode bit is not set, we iterate over the Aff0 + * bits and signal the VCPUs matching the provided Aff{3,2,1}. + * + * If this bit is set, we signal all, but not the calling VCPU. */ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) { @@ -1104,7 +1077,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) struct kvm_vcpu *c_vcpu; unsigned long target_cpus; u64 mpidr; - u32 sgi; + u32 sgi, aff0; unsigned long c; sgi = FIELD_GET(ICC_SGI1R_SGI_ID_MASK, reg); @@ -1122,31 +1095,16 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1) return; } + /* We iterate over affinities to find the corresponding vcpus */ mpidr = SGI_AFFINITY_LEVEL(reg, 3); mpidr |= SGI_AFFINITY_LEVEL(reg, 2); mpidr |= SGI_AFFINITY_LEVEL(reg, 1); target_cpus = FIELD_GET(ICC_SGI1R_TARGET_LIST_MASK, reg); - /* - * We iterate over all VCPUs to find the MPIDRs matching the request. - * If we have handled one CPU, we clear its bit to detect early - * if we are already finished. This avoids iterating through all - * VCPUs when most of the times we just signal a single VCPU. - */ - kvm_for_each_vcpu(c, c_vcpu, kvm) { - int level0; - - /* Exit early if we have dealt with all requested CPUs */ - if (target_cpus == 0) - break; - level0 = match_mpidr(mpidr, target_cpus, c_vcpu); - if (level0 == -1) - continue; - - /* remove this matching VCPU from the mask */ - target_cpus &= ~BIT(level0); - - vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1); + for_each_set_bit(aff0, &target_cpus, hweight_long(ICC_SGI1R_TARGET_LIST_MASK)) { + c_vcpu = kvm_mpidr_to_vcpu(kvm, mpidr | aff0); + if (c_vcpu) + vgic_v3_queue_sgi(c_vcpu, sgi, allow_group1); } } -- cgit v1.2.3 From f9940416f193d148dd7ad7498ce4f40c83d12e7a Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 27 Sep 2023 10:09:11 +0100 Subject: KVM: arm64: Clarify the ordering requirements for vcpu/RD creation It goes without saying, but it is probably better to spell it out: If userspace tries to restore and VM, but creates vcpus and/or RDs in a different order, the vcpu/RD mapping will be different. Yes, our API is an ugly piece of crap and I can't believe that we missed this. If we want to relax the above, we'll need to define a new userspace API that allows the mapping to be specified, rather than relying on the kernel to perform the mapping on its own. Reviewed-by: Zenghui Yu Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230927090911.3355209-12-maz@kernel.org Signed-off-by: Oliver Upton --- Documentation/virt/kvm/devices/arm-vgic-v3.rst | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.rst b/Documentation/virt/kvm/devices/arm-vgic-v3.rst index 51e5e5762571..5817edb4e046 100644 --- a/Documentation/virt/kvm/devices/arm-vgic-v3.rst +++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst @@ -59,6 +59,13 @@ Groups: It is invalid to mix calls with KVM_VGIC_V3_ADDR_TYPE_REDIST and KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes. + Note that to obtain reproducible results (the same VCPU being associated + with the same redistributor across a save/restore operation), VCPU creation + order, redistributor region creation order as well as the respective + interleaves of VCPU and region creation MUST be preserved. Any change in + either ordering may result in a different vcpu_id/redistributor association, + resulting in a VM that will fail to run at restore time. + Errors: ======= ============================================================= -- cgit v1.2.3 From 3f9cd0ca848413fd368278310d2cdd6c2bef48b2 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 3 Oct 2023 23:03:57 +0000 Subject: KVM: arm64: Allow userspace to get the writable masks for feature ID registers While the Feature ID range is well defined and pretty large, it isn't inconceivable that the architecture will eventually grow some other ranges that will need to similarly be described to userspace. Add a VM ioctl to allow userspace to get writable masks for feature ID registers in below system register space: op0 = 3, op1 = {0, 1, 3}, CRn = 0, CRm = {0 - 7}, op2 = {0 - 7} This is used to support mix-and-match userspace and kernels for writable ID registers, where userspace may want to know upfront whether it can actually tweak the contents of an idreg or not. Add a new capability (KVM_CAP_ARM_SUPPORTED_FEATURE_ID_RANGES) that returns a bitmap of the valid ranges, which can subsequently be retrieved, one at a time by setting the index of the set bit as the range identifier. Suggested-by: Marc Zyngier Suggested-by: Cornelia Huck Signed-off-by: Jing Zhang Reviewed-by: Cornelia Huck Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 2 ++ arch/arm64/include/uapi/asm/kvm.h | 32 +++++++++++++++++++ arch/arm64/kvm/arm.c | 10 ++++++ arch/arm64/kvm/sys_regs.c | 66 +++++++++++++++++++++++++++++++++++++++ include/uapi/linux/kvm.h | 2 ++ 5 files changed, 112 insertions(+) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..5e3f778f81db 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1078,6 +1078,8 @@ int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm, struct kvm_arm_copy_mte_tags *copy_tags); int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm, struct kvm_arm_counter_offset *offset); +int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, + struct reg_mask_range *range); /* Guest/host FPSIMD coordination helpers */ int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu); diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index f7ddd73a8c0f..89d2fc872d9f 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -505,6 +505,38 @@ struct kvm_smccc_filter { #define KVM_HYPERCALL_EXIT_SMC (1U << 0) #define KVM_HYPERCALL_EXIT_16BIT (1U << 1) +/* + * Get feature ID registers userspace writable mask. + * + * From DDI0487J.a, D19.2.66 ("ID_AA64MMFR2_EL1, AArch64 Memory Model + * Feature Register 2"): + * + * "The Feature ID space is defined as the System register space in + * AArch64 with op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, + * op2=={0-7}." + * + * This covers all currently known R/O registers that indicate + * anything useful feature wise, including the ID registers. + * + * If we ever need to introduce a new range, it will be described as + * such in the range field. + */ +#define KVM_ARM_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2) \ + ({ \ + __u64 __op1 = (op1) & 3; \ + __op1 -= (__op1 == 3); \ + (__op1 << 6 | ((crm) & 7) << 3 | (op2)); \ + }) + +#define KVM_ARM_FEATURE_ID_RANGE 0 +#define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8) + +struct reg_mask_range { + __u64 addr; /* Pointer to mask array */ + __u32 range; /* Requested range */ + __u32 reserved[13]; +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..f48430ed5b8b 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -317,6 +317,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES: r = kvm_supported_block_sizes(); break; + case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: + r = BIT(0); + break; default: r = 0; } @@ -1629,6 +1632,13 @@ int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) return kvm_vm_set_attr(kvm, &attr); } + case KVM_ARM_GET_REG_WRITABLE_MASKS: { + struct reg_mask_range range; + + if (copy_from_user(&range, argp, sizeof(range))) + return -EFAULT; + return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range); + } default: return -EINVAL; } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e92ec810d449..5b0e94367f74 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1373,6 +1373,13 @@ static inline bool is_id_reg(u32 id) sys_reg_CRm(id) < 8); } +static inline bool is_aa32_id_reg(u32 id) +{ + return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 && + sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 && + sys_reg_CRm(id) <= 3); +} + static unsigned int id_visibility(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { @@ -3572,6 +3579,65 @@ int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) return write_demux_regids(uindices); } +#define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \ + KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \ + sys_reg_Op1(r), \ + sys_reg_CRn(r), \ + sys_reg_CRm(r), \ + sys_reg_Op2(r)) + +static bool is_feature_id_reg(u32 encoding) +{ + return (sys_reg_Op0(encoding) == 3 && + (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) && + sys_reg_CRn(encoding) == 0 && + sys_reg_CRm(encoding) <= 7); +} + +int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range) +{ + const void *zero_page = page_to_virt(ZERO_PAGE(0)); + u64 __user *masks = (u64 __user *)range->addr; + + /* Only feature id range is supported, reserved[13] must be zero. */ + if (range->range || + memcmp(range->reserved, zero_page, sizeof(range->reserved))) + return -EINVAL; + + /* Wipe the whole thing first */ + if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64))) + return -EFAULT; + + for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) { + const struct sys_reg_desc *reg = &sys_reg_descs[i]; + u32 encoding = reg_to_encoding(reg); + u64 val; + + if (!is_feature_id_reg(encoding) || !reg->set_user) + continue; + + /* + * For ID registers, we return the writable mask. Other feature + * registers return a full 64bit mask. That's not necessary + * compliant with a given revision of the architecture, but the + * RES0/RES1 definitions allow us to do that. + */ + if (is_id_reg(encoding)) { + if (!reg->val || + (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) + continue; + val = reg->val; + } else { + val = ~0UL; + } + + if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding)))) + return -EFAULT; + } + + return 0; +} + int __init kvm_sys_reg_table_init(void) { struct sys_reg_params params; diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index 13065dd96132..f49c84487200 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1192,6 +1192,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_COUNTER_OFFSET 227 #define KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE 228 #define KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES 229 +#define KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES 230 #ifdef KVM_CAP_IRQ_ROUTING @@ -1562,6 +1563,7 @@ struct kvm_s390_ucas_mapping { #define KVM_ARM_MTE_COPY_TAGS _IOR(KVMIO, 0xb4, struct kvm_arm_copy_mte_tags) /* Available with KVM_CAP_COUNTER_OFFSET */ #define KVM_ARM_SET_COUNTER_OFFSET _IOW(KVMIO, 0xb5, struct kvm_arm_counter_offset) +#define KVM_ARM_GET_REG_WRITABLE_MASKS _IOR(KVMIO, 0xb6, struct reg_mask_range) /* ioctl for vm fd */ #define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device) -- cgit v1.2.3 From 6656cda0f3b269cb87cfb5773d3369e6d96e83db Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 3 Oct 2023 23:03:58 +0000 Subject: KVM: arm64: Document KVM_ARM_GET_REG_WRITABLE_MASKS Add some basic documentation on how to get feature ID register writable masks from userspace. Signed-off-by: Jing Zhang Reviewed-by: Cornelia Huck Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- Documentation/virt/kvm/api.rst | 48 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index 21a7578142a1..c4cb703e37d1 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6070,6 +6070,54 @@ writes to the CNTVCT_EL0 and CNTPCT_EL0 registers using the SET_ONE_REG interface. No error will be returned, but the resulting offset will not be applied. +4.139 KVM_ARM_GET_REG_WRITABLE_MASKS +------------------------------------------- + +:Capability: KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES +:Architectures: arm64 +:Type: vm ioctl +:Parameters: struct reg_mask_range (in/out) +:Returns: 0 on success, < 0 on error + + +:: + + #define KVM_ARM_FEATURE_ID_RANGE 0 + #define KVM_ARM_FEATURE_ID_RANGE_SIZE (3 * 8 * 8) + + struct reg_mask_range { + __u64 addr; /* Pointer to mask array */ + __u32 range; /* Requested range */ + __u32 reserved[13]; + }; + +This ioctl copies the writable masks for a selected range of registers to +userspace. + +The ``addr`` field is a pointer to the destination array where KVM copies +the writable masks. + +The ``range`` field indicates the requested range of registers. +``KVM_CHECK_EXTENSION`` for the ``KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES`` +capability returns the supported ranges, expressed as a set of flags. Each +flag's bit index represents a possible value for the ``range`` field. +All other values are reserved for future use and KVM may return an error. + +The ``reserved[13]`` array is reserved for future use and should be 0, or +KVM may return an error. + +KVM_ARM_FEATURE_ID_RANGE (0) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The Feature ID range is defined as the AArch64 System register space with +op0==3, op1=={0, 1, 3}, CRn==0, CRm=={0-7}, op2=={0-7}. + +The mask returned array pointed to by ``addr`` is indexed by the macro +``ARM64_FEATURE_ID_RANGE_IDX(op0, op1, crn, crm, op2)``, allowing userspace +to know what fields can be changed for the system register described by +``op0, op1, crn, crm, op2``. KVM rejects ID register values that describe a +superset of the features supported by the system. + 5. The kvm_run structure ======================== -- cgit v1.2.3 From 8b6958d6ace19d56b44ca0f961f4fa480d4b48fc Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 3 Oct 2023 23:03:59 +0000 Subject: KVM: arm64: Use guest ID register values for the sake of emulation Since KVM now supports per-VM ID registers, use per-VM ID register values for the sake of emulation for DBGDIDR and LORegion. Signed-off-by: Jing Zhang Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 5b0e94367f74..b73f9ff0899b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -379,7 +379,7 @@ static bool trap_loregion(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); + u64 val = IDREG(vcpu->kvm, SYS_ID_AA64MMFR1_EL1); u32 sr = reg_to_encoding(r); if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) { @@ -2445,8 +2445,8 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, if (p->is_write) { return ignore_write(vcpu, p); } else { - u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); - u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); + u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); + u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) | -- cgit v1.2.3 From 5a23e5c7cb0d5b2b38c33417579cd628be7d14c5 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 Oct 2023 17:03:17 +0000 Subject: KVM: arm64: Advertise selected DebugVer in DBGDIDR.Version Much like we do for other fields, extract the Debug architecture version from the ID register to populate the corresponding field in DBGDIDR. Rewrite the existing sysreg field extractors to use SYS_FIELD_GET() for consistency. Suggested-by: Marc Zyngier Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index b73f9ff0899b..34f578d110a9 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2447,12 +2447,13 @@ static bool trap_dbgdidr(struct kvm_vcpu *vcpu, } else { u64 dfr = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1); u64 pfr = IDREG(vcpu->kvm, SYS_ID_AA64PFR0_EL1); - u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT); + u32 el3 = !!SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr); - p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) | - (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) | - (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20) - | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12)); + p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) | + (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) | + (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) | + (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) | + (1 << 15) | (el3 << 14) | (el3 << 12)); return true; } } -- cgit v1.2.3 From a9bc4a1c1e0c206838bc2d0c8621ca6df9704a2f Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 Oct 2023 23:04:00 +0000 Subject: KVM: arm64: Reject attempts to set invalid debug arch version The debug architecture is mandatory in ARMv8, so KVM should not allow userspace to configure a vCPU with less than that. Of course, this isn't handled elegantly by the generic ID register plumbing, as the respective ID register fields have a nonzero starting value. Add an explicit check for debug versions less than v8 of the architecture. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 32 +++++++++++++++++++++++++++++--- 1 file changed, 29 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 34f578d110a9..1178e2690b77 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1216,8 +1216,14 @@ static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp, /* Some features have different safe value type in KVM than host features */ switch (id) { case SYS_ID_AA64DFR0_EL1: - if (kvm_ftr.shift == ID_AA64DFR0_EL1_PMUVer_SHIFT) + switch (kvm_ftr.shift) { + case ID_AA64DFR0_EL1_PMUVer_SHIFT: kvm_ftr.type = FTR_LOWER_SAFE; + break; + case ID_AA64DFR0_EL1_DebugVer_SHIFT: + kvm_ftr.type = FTR_LOWER_SAFE; + break; + } break; case SYS_ID_DFR0_EL1: if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT) @@ -1476,14 +1482,22 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu, return val; } +#define ID_REG_LIMIT_FIELD_ENUM(val, reg, field, limit) \ +({ \ + u64 __f_val = FIELD_GET(reg##_##field##_MASK, val); \ + (val) &= ~reg##_##field##_MASK; \ + (val) |= FIELD_PREP(reg##_##field##_MASK, \ + min(__f_val, (u64)reg##_##field##_##limit)); \ + (val); \ +}) + static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd) { u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); /* Limit debug to ARMv8.0 */ - val &= ~ID_AA64DFR0_EL1_DebugVer_MASK; - val |= SYS_FIELD_PREP_ENUM(ID_AA64DFR0_EL1, DebugVer, IMP); + val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, IMP); /* * Only initialize the PMU version if the vCPU was configured with one. @@ -1503,6 +1517,7 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd, u64 val) { + u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val); u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val); /* @@ -1522,6 +1537,13 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) val &= ~ID_AA64DFR0_EL1_PMUVer_MASK; + /* + * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a + * nonzero minimum safe value. + */ + if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP) + return -EINVAL; + return set_id_reg(vcpu, rd, val); } @@ -1543,6 +1565,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, u64 val) { u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val); + u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val); if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) { val &= ~ID_DFR0_EL1_PerfMon_MASK; @@ -1558,6 +1581,9 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu, if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3) return -EINVAL; + if (copdbg < ID_DFR0_EL1_CopDbg_Armv8) + return -EINVAL; + return set_id_reg(vcpu, rd, val); } -- cgit v1.2.3 From 9f9917bc71b08335819ea667d1e392424fb76450 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 Oct 2023 23:04:01 +0000 Subject: KVM: arm64: Bump up the default KVM sanitised debug version to v8p8 Since ID_AA64DFR0_EL1 and ID_DFR0_EL1 are now writable from userspace, it is safe to bump up the default KVM sanitised debug version to v8p8. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 1178e2690b77..c28d864a6807 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1496,8 +1496,7 @@ static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu, { u64 val = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1); - /* Limit debug to ARMv8.0 */ - val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, IMP); + val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8); /* * Only initialize the PMU version if the vCPU was configured with one. @@ -1557,6 +1556,8 @@ static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu, if (kvm_vcpu_has_pmu(vcpu)) val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon); + val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8); + return val; } @@ -2013,7 +2014,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { .set_user = set_id_dfr0_el1, .visibility = aa32_id_visibility, .reset = read_sanitised_id_dfr0_el1, - .val = ID_DFR0_EL1_PerfMon_MASK, }, + .val = ID_DFR0_EL1_PerfMon_MASK | + ID_DFR0_EL1_CopDbg_MASK, }, ID_HIDDEN(ID_AFR0_EL1), AA32_ID_SANITISED(ID_MMFR0_EL1), AA32_ID_SANITISED(ID_MMFR1_EL1), @@ -2062,7 +2064,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, .reset = read_sanitised_id_aa64dfr0_el1, - .val = ID_AA64DFR0_EL1_PMUVer_MASK, }, + .val = ID_AA64DFR0_EL1_PMUVer_MASK | + ID_AA64DFR0_EL1_DebugVer_MASK, }, ID_SANITISED(ID_AA64DFR1_EL1), ID_UNALLOCATED(5,2), ID_UNALLOCATED(5,3), -- cgit v1.2.3 From 56d77aa8bdf527f3b767fa27a3f135769151c92d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 Oct 2023 23:04:02 +0000 Subject: KVM: arm64: Allow userspace to change ID_AA64ISAR{0-2}_EL1 Almost all of the features described by the ISA registers have no KVM involvement. Allow userspace to change the value of these registers with a couple exceptions: - MOPS is not writable as KVM does not currently virtualize FEAT_MOPS. - The PAuth fields are not writable as KVM requires both address and generic authentication be enabled. Co-developed-by: Jing Zhang Signed-off-by: Jing Zhang Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-7-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 38 ++++++++++++++++++++++++++------------ 1 file changed, 26 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index c28d864a6807..5d1a94af42f2 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1851,11 +1851,14 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu, * from userspace. */ -/* sys_reg_desc initialiser for known cpufeature ID registers */ -#define ID_SANITISED(name) { \ +#define ID_DESC(name) \ SYS_DESC(SYS_##name), \ .access = access_id_reg, \ - .get_user = get_id_reg, \ + .get_user = get_id_reg \ + +/* sys_reg_desc initialiser for known cpufeature ID registers */ +#define ID_SANITISED(name) { \ + ID_DESC(name), \ .set_user = set_id_reg, \ .visibility = id_visibility, \ .reset = kvm_read_sanitised_id_reg, \ @@ -1864,15 +1867,22 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu, /* sys_reg_desc initialiser for known cpufeature ID registers */ #define AA32_ID_SANITISED(name) { \ - SYS_DESC(SYS_##name), \ - .access = access_id_reg, \ - .get_user = get_id_reg, \ + ID_DESC(name), \ .set_user = set_id_reg, \ .visibility = aa32_id_visibility, \ .reset = kvm_read_sanitised_id_reg, \ .val = 0, \ } +/* sys_reg_desc initialiser for writable ID registers */ +#define ID_WRITABLE(name, mask) { \ + ID_DESC(name), \ + .set_user = set_id_reg, \ + .visibility = id_visibility, \ + .reset = kvm_read_sanitised_id_reg, \ + .val = mask, \ +} + /* * sys_reg_desc initialiser for architecturally unallocated cpufeature ID * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2 @@ -1894,9 +1904,7 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu, * RAZ for the guest. */ #define ID_HIDDEN(name) { \ - SYS_DESC(SYS_##name), \ - .access = access_id_reg, \ - .get_user = get_id_reg, \ + ID_DESC(name), \ .set_user = set_id_reg, \ .visibility = raz_visibility, \ .reset = kvm_read_sanitised_id_reg, \ @@ -2075,9 +2083,15 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_UNALLOCATED(5,7), /* CRm=6 */ - ID_SANITISED(ID_AA64ISAR0_EL1), - ID_SANITISED(ID_AA64ISAR1_EL1), - ID_SANITISED(ID_AA64ISAR2_EL1), + ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0), + ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI | + ID_AA64ISAR1_EL1_GPA | + ID_AA64ISAR1_EL1_API | + ID_AA64ISAR1_EL1_APA)), + ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | + ID_AA64ISAR2_EL1_MOPS | + ID_AA64ISAR2_EL1_APA3 | + ID_AA64ISAR2_EL1_GPA3)), ID_UNALLOCATED(6,3), ID_UNALLOCATED(6,4), ID_UNALLOCATED(6,5), -- cgit v1.2.3 From d5a32b60dc184cc7309f83648a368b94d91c797f Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 3 Oct 2023 23:04:03 +0000 Subject: KVM: arm64: Allow userspace to change ID_AA64MMFR{0-2}_EL1 Allow userspace to modify the guest-visible values of these ID registers. Prevent changes to any of the virtualization features until KVM picks up support for nested and we have a handle on managing NV features. Signed-off-by: Jing Zhang [oliver: prevent changes to EL2 features for now] Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-8-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 5d1a94af42f2..d850b05eb504 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2099,9 +2099,23 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_UNALLOCATED(6,7), /* CRm=7 */ - ID_SANITISED(ID_AA64MMFR0_EL1), - ID_SANITISED(ID_AA64MMFR1_EL1), - ID_SANITISED(ID_AA64MMFR2_EL1), + ID_WRITABLE(ID_AA64MMFR0_EL1, ~(ID_AA64MMFR0_EL1_RES0 | + ID_AA64MMFR0_EL1_TGRAN4_2 | + ID_AA64MMFR0_EL1_TGRAN64_2 | + ID_AA64MMFR0_EL1_TGRAN16_2)), + ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 | + ID_AA64MMFR1_EL1_HCX | + ID_AA64MMFR1_EL1_XNX | + ID_AA64MMFR1_EL1_TWED | + ID_AA64MMFR1_EL1_XNX | + ID_AA64MMFR1_EL1_VH | + ID_AA64MMFR1_EL1_VMIDBits)), + ID_WRITABLE(ID_AA64MMFR2_EL1, ~(ID_AA64MMFR2_EL1_RES0 | + ID_AA64MMFR2_EL1_EVT | + ID_AA64MMFR2_EL1_FWB | + ID_AA64MMFR2_EL1_IDS | + ID_AA64MMFR2_EL1_NV | + ID_AA64MMFR2_EL1_CCIDX)), ID_SANITISED(ID_AA64MMFR3_EL1), ID_UNALLOCATED(7,4), ID_UNALLOCATED(7,5), -- cgit v1.2.3 From 8cfd5be88ebe3d71be1a2c52fc72a355bb924b49 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Tue, 3 Oct 2023 23:04:04 +0000 Subject: KVM: arm64: Allow userspace to change ID_AA64PFR0_EL1 Allow userspace to change the guest-visible value of the register with some severe limitation: - No changes to features not virtualized by KVM (AMU, MPAM, RAS) - Short of full GICv2 emulation in kernel, hiding GICv3 from the guest makes absolutely no sense. - FP is effectively assumed for KVM VMs. Signed-off-by: Jing Zhang [oliver: restrict features that are illogical to change] Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-9-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d850b05eb504..dbf915f88b17 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2057,7 +2057,13 @@ static const struct sys_reg_desc sys_reg_descs[] = { .get_user = get_id_reg, .set_user = set_id_reg, .reset = read_sanitised_id_aa64pfr0_el1, - .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, }, + .val = ~(ID_AA64PFR0_EL1_AMU | + ID_AA64PFR0_EL1_MPAM | + ID_AA64PFR0_EL1_SVE | + ID_AA64PFR0_EL1_RAS | + ID_AA64PFR0_EL1_GIC | + ID_AA64PFR0_EL1_AdvSIMD | + ID_AA64PFR0_EL1_FP), }, ID_SANITISED(ID_AA64PFR1_EL1), ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), -- cgit v1.2.3 From f89fbb350dd76d6b5f080954309b9dec5ad220ac Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 Oct 2023 23:04:05 +0000 Subject: KVM: arm64: Allow userspace to change ID_AA64ZFR0_EL1 All known fields in ID_AA64ZFR0_EL1 describe the unprivileged instructions supported by the PE's SVE implementation. Allow userspace to pick and choose the advertised feature set, though nothing stops the guest from using undisclosed instructions. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-10-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index dbf915f88b17..57c8190d5438 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -2067,7 +2067,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_SANITISED(ID_AA64PFR1_EL1), ID_UNALLOCATED(4,2), ID_UNALLOCATED(4,3), - ID_SANITISED(ID_AA64ZFR0_EL1), + ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0), ID_HIDDEN(ID_AA64SMFR0_EL1), ID_UNALLOCATED(4,6), ID_UNALLOCATED(4,7), -- cgit v1.2.3 From dafa493dd01d5992f1cb70b08d1741c3ab99e04a Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 3 Oct 2023 23:04:06 +0000 Subject: KVM: arm64: Document vCPU feature selection UAPIs KVM/arm64 has a couple schemes for handling vCPU feature selection now, which is a lot to put on userspace. Add some documentation about how these interact and provide some recommendations for how to use the writable ID register scheme. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231003230408.3405722-11-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- Documentation/virt/kvm/api.rst | 4 +++ Documentation/virt/kvm/arm/index.rst | 1 + Documentation/virt/kvm/arm/vcpu-features.rst | 48 ++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+) create mode 100644 Documentation/virt/kvm/arm/vcpu-features.rst diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index c4cb703e37d1..d7ce71e27c33 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -3370,6 +3370,8 @@ return indicates the attribute is implemented. It does not necessarily indicate that the attribute can be read or written in the device's current state. "addr" is ignored. +.. _KVM_ARM_VCPU_INIT: + 4.82 KVM_ARM_VCPU_INIT ---------------------- @@ -6070,6 +6072,8 @@ writes to the CNTVCT_EL0 and CNTPCT_EL0 registers using the SET_ONE_REG interface. No error will be returned, but the resulting offset will not be applied. +.. _KVM_ARM_GET_REG_WRITABLE_MASKS: + 4.139 KVM_ARM_GET_REG_WRITABLE_MASKS ------------------------------------------- diff --git a/Documentation/virt/kvm/arm/index.rst b/Documentation/virt/kvm/arm/index.rst index e84848432158..7f231c724e16 100644 --- a/Documentation/virt/kvm/arm/index.rst +++ b/Documentation/virt/kvm/arm/index.rst @@ -11,3 +11,4 @@ ARM hypercalls pvtime ptp_kvm + vcpu-features diff --git a/Documentation/virt/kvm/arm/vcpu-features.rst b/Documentation/virt/kvm/arm/vcpu-features.rst new file mode 100644 index 000000000000..f7cc6d8d8b74 --- /dev/null +++ b/Documentation/virt/kvm/arm/vcpu-features.rst @@ -0,0 +1,48 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============================== +vCPU feature selection on arm64 +=============================== + +KVM/arm64 provides two mechanisms that allow userspace to configure +the CPU features presented to the guest. + +KVM_ARM_VCPU_INIT +================= + +The ``KVM_ARM_VCPU_INIT`` ioctl accepts a bitmap of feature flags +(``struct kvm_vcpu_init::features``). Features enabled by this interface are +*opt-in* and may change/extend UAPI. See :ref:`KVM_ARM_VCPU_INIT` for complete +documentation of the features controlled by the ioctl. + +Otherwise, all CPU features supported by KVM are described by the architected +ID registers. + +The ID Registers +================ + +The Arm architecture specifies a range of *ID Registers* that describe the set +of architectural features supported by the CPU implementation. KVM initializes +the guest's ID registers to the maximum set of CPU features supported by the +system. The ID register values may be VM-scoped in KVM, meaning that the +values could be shared for all vCPUs in a VM. + +KVM allows userspace to *opt-out* of certain CPU features described by the ID +registers by writing values to them via the ``KVM_SET_ONE_REG`` ioctl. The ID +registers are mutable until the VM has started, i.e. userspace has called +``KVM_RUN`` on at least one vCPU in the VM. Userspace can discover what fields +are mutable in the ID registers using the ``KVM_ARM_GET_REG_WRITABLE_MASKS``. +See the :ref:`ioctl documentation ` for more +details. + +Userspace is allowed to *limit* or *mask* CPU features according to the rules +outlined by the architecture in DDI0487J.a D19.1.3 'Principles of the ID +scheme for fields in ID register'. KVM does not allow ID register values that +exceed the capabilities of the system. + +.. warning:: + It is **strongly recommended** that userspace modify the ID register values + before accessing the rest of the vCPU's CPU register state. KVM may use the + ID register values to control feature emulation. Interleaving ID register + modification with other system register accesses may lead to unpredictable + behavior. -- cgit v1.2.3 From bb17fb31f00ebd8df478a9533c6b77d6eebe6464 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 Oct 2023 23:49:45 +0000 Subject: KVM: arm64: Add a predicate for testing if SMCCC filter is configured Eventually we can drop the VM flag, move around the existing implementation for now. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231004234947.207507-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hypercalls.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 7fb4df0456de..35e023322cdb 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -158,6 +158,11 @@ static void init_smccc_filter(struct kvm *kvm) } +static bool kvm_smccc_filter_configured(struct kvm *kvm) +{ + return test_bit(KVM_ARCH_FLAT_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags); +} + static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr) { const void *zero_page = page_to_virt(ZERO_PAGE(0)); @@ -201,7 +206,7 @@ static u8 kvm_smccc_filter_get_action(struct kvm *kvm, u32 func_id) unsigned long idx = func_id; void *val; - if (!test_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags)) + if (!kvm_smccc_filter_configured(kvm)) return KVM_SMCCC_FILTER_HANDLE; /* -- cgit v1.2.3 From d34b76489ea0c6168d230d4f9ad065422ba5f6d6 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 Oct 2023 23:49:46 +0000 Subject: KVM: arm64: Only insert reserved ranges when SMCCC filter is used The reserved ranges are only useful for preventing userspace from adding a rule that intersects with functions we must handle in KVM. If userspace never writes to the SMCCC filter than this is all just wasted work/memory. Insert reserved ranges on the first call to KVM_ARM_VM_SMCCC_FILTER. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231004234947.207507-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hypercalls.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 35e023322cdb..20a878c64ba7 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -133,12 +133,10 @@ static bool kvm_smccc_test_fw_bmap(struct kvm_vcpu *vcpu, u32 func_id) ARM_SMCCC_SMC_64, \ 0, ARM_SMCCC_FUNC_MASK) -static void init_smccc_filter(struct kvm *kvm) +static int kvm_smccc_filter_insert_reserved(struct kvm *kvm) { int r; - mt_init(&kvm->arch.smccc_filter); - /* * Prevent userspace from handling any SMCCC calls in the architecture * range, avoiding the risk of misrepresenting Spectre mitigation status @@ -148,14 +146,20 @@ static void init_smccc_filter(struct kvm *kvm) SMC32_ARCH_RANGE_BEGIN, SMC32_ARCH_RANGE_END, xa_mk_value(KVM_SMCCC_FILTER_HANDLE), GFP_KERNEL_ACCOUNT); - WARN_ON_ONCE(r); + if (r) + goto out_destroy; r = mtree_insert_range(&kvm->arch.smccc_filter, SMC64_ARCH_RANGE_BEGIN, SMC64_ARCH_RANGE_END, xa_mk_value(KVM_SMCCC_FILTER_HANDLE), GFP_KERNEL_ACCOUNT); - WARN_ON_ONCE(r); + if (r) + goto out_destroy; + return 0; +out_destroy: + mtree_destroy(&kvm->arch.smccc_filter); + return r; } static bool kvm_smccc_filter_configured(struct kvm *kvm) @@ -189,6 +193,12 @@ static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user goto out_unlock; } + if (!kvm_smccc_filter_configured(kvm)) { + r = kvm_smccc_filter_insert_reserved(kvm); + if (WARN_ON_ONCE(r)) + goto out_unlock; + } + r = mtree_insert_range(&kvm->arch.smccc_filter, start, end, xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT); if (r) @@ -392,7 +402,7 @@ void kvm_arm_init_hypercalls(struct kvm *kvm) smccc_feat->std_hyp_bmap = KVM_ARM_SMCCC_STD_HYP_FEATURES; smccc_feat->vendor_hyp_bmap = KVM_ARM_SMCCC_VENDOR_HYP_FEATURES; - init_smccc_filter(kvm); + mt_init(&kvm->arch.smccc_filter); } void kvm_arm_teardown_hypercalls(struct kvm *kvm) -- cgit v1.2.3 From 4202bcac5e65de2d7193b7e27984b810ba33aada Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 4 Oct 2023 23:49:47 +0000 Subject: KVM: arm64: Use mtree_empty() to determine if SMCCC filter configured The smccc_filter maple tree is only populated if userspace attempted to configure it. Use the state of the maple tree to determine if the filter has been configured, eliminating the VM flag. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20231004234947.207507-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 4 +--- arch/arm64/kvm/hypercalls.c | 7 +------ 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..feb63db7a5cf 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -239,10 +239,8 @@ struct kvm_arch { #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5 /* Timer PPIs made immutable */ #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6 - /* SMCCC filter initialized for the VM */ -#define KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED 7 /* Initial ID reg values loaded */ -#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 8 +#define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7 unsigned long flags; /* VM-wide vCPU feature set */ diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 20a878c64ba7..a61213786e5f 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -164,7 +164,7 @@ out_destroy: static bool kvm_smccc_filter_configured(struct kvm *kvm) { - return test_bit(KVM_ARCH_FLAT_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags); + return !mtree_empty(&kvm->arch.smccc_filter); } static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user *uaddr) @@ -201,11 +201,6 @@ static int kvm_smccc_set_filter(struct kvm *kvm, struct kvm_smccc_filter __user r = mtree_insert_range(&kvm->arch.smccc_filter, start, end, xa_mk_value(filter.action), GFP_KERNEL_ACCOUNT); - if (r) - goto out_unlock; - - set_bit(KVM_ARCH_FLAG_SMCCC_FILTER_CONFIGURED, &kvm->arch.flags); - out_unlock: mutex_unlock(&kvm->arch.config_lock); return r; -- cgit v1.2.3 From 2de451a329cf662beeba71f63c7f83ee24ca6642 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 22 Sep 2023 12:25:07 +0100 Subject: KVM: arm64: Add handler for MOPS exceptions An Armv8.8 FEAT_MOPS main or epilogue instruction will take an exception if executed on a CPU with a different MOPS implementation option (A or B) than the CPU where the preceding prologue instruction ran. In this case the OS exception handler is expected to reset the registers and restart execution from the prologue instruction. A KVM guest may use the instructions at EL1 at times when the guest is not able to handle the exception, expecting that the instructions will only run on one CPU (e.g. when running UEFI boot services in the guest). As KVM may reschedule the guest between different types of CPUs at any time (on an asymmetric system), it needs to also handle the resulting exception itself in case the guest is not able to. A similar situation will also occur in the future when live migrating a guest from one type of CPU to another. Add handling for the MOPS exception to KVM. The handling can be shared with the EL0 exception handler, as the logic and register layouts are the same. The exception can be handled right after exiting a guest, which avoids the cost of returning to the host exit handler. Similarly to the EL0 exception handler, in case the main or epilogue instruction is being single stepped, it makes sense to finish the step before executing the prologue instruction, so advance the single step state machine. Signed-off-by: Kristina Martsenko Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230922112508.1774352-2-kristina.martsenko@arm.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/traps.h | 54 +++++++++++++++++++++++++++++++-- arch/arm64/kernel/traps.c | 48 +---------------------------- arch/arm64/kvm/hyp/include/hyp/switch.h | 17 +++++++++++ arch/arm64/kvm/hyp/nvhe/switch.c | 2 ++ arch/arm64/kvm/hyp/vhe/switch.c | 1 + 5 files changed, 73 insertions(+), 49 deletions(-) diff --git a/arch/arm64/include/asm/traps.h b/arch/arm64/include/asm/traps.h index d66dfb3a72dd..eefe766d6161 100644 --- a/arch/arm64/include/asm/traps.h +++ b/arch/arm64/include/asm/traps.h @@ -9,10 +9,9 @@ #include #include +#include #include -struct pt_regs; - #ifdef CONFIG_ARMV8_DEPRECATED bool try_emulate_armv8_deprecated(struct pt_regs *regs, u32 insn); #else @@ -101,4 +100,55 @@ static inline unsigned long arm64_ras_serror_get_severity(unsigned long esr) bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned long esr); void __noreturn arm64_serror_panic(struct pt_regs *regs, unsigned long esr); + +static inline void arm64_mops_reset_regs(struct user_pt_regs *regs, unsigned long esr) +{ + bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; + bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; + int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); + int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); + int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); + unsigned long dst, src, size; + + dst = regs->regs[dstreg]; + src = regs->regs[srcreg]; + size = regs->regs[sizereg]; + + /* + * Put the registers back in the original format suitable for a + * prologue instruction, using the generic return routine from the + * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. + */ + if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { + /* SET* instruction */ + if (option_a ^ wrong_option) { + /* Format is from Option A; forward set */ + regs->regs[dstreg] = dst + size; + regs->regs[sizereg] = -size; + } + } else { + /* CPY* instruction */ + if (!(option_a ^ wrong_option)) { + /* Format is from Option B */ + if (regs->pstate & PSR_N_BIT) { + /* Backward copy */ + regs->regs[dstreg] = dst - size; + regs->regs[srcreg] = src - size; + } + } else { + /* Format is from Option A */ + if (size & BIT(63)) { + /* Forward copy */ + regs->regs[dstreg] = dst + size; + regs->regs[srcreg] = src + size; + regs->regs[sizereg] = -size; + } + } + } + + if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) + regs->pc -= 8; + else + regs->pc -= 4; +} #endif diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 8b70759cdbb9..ede65a20e7dc 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -516,53 +516,7 @@ void do_el1_fpac(struct pt_regs *regs, unsigned long esr) void do_el0_mops(struct pt_regs *regs, unsigned long esr) { - bool wrong_option = esr & ESR_ELx_MOPS_ISS_WRONG_OPTION; - bool option_a = esr & ESR_ELx_MOPS_ISS_OPTION_A; - int dstreg = ESR_ELx_MOPS_ISS_DESTREG(esr); - int srcreg = ESR_ELx_MOPS_ISS_SRCREG(esr); - int sizereg = ESR_ELx_MOPS_ISS_SIZEREG(esr); - unsigned long dst, src, size; - - dst = pt_regs_read_reg(regs, dstreg); - src = pt_regs_read_reg(regs, srcreg); - size = pt_regs_read_reg(regs, sizereg); - - /* - * Put the registers back in the original format suitable for a - * prologue instruction, using the generic return routine from the - * Arm ARM (DDI 0487I.a) rules CNTMJ and MWFQH. - */ - if (esr & ESR_ELx_MOPS_ISS_MEM_INST) { - /* SET* instruction */ - if (option_a ^ wrong_option) { - /* Format is from Option A; forward set */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } else { - /* CPY* instruction */ - if (!(option_a ^ wrong_option)) { - /* Format is from Option B */ - if (regs->pstate & PSR_N_BIT) { - /* Backward copy */ - pt_regs_write_reg(regs, dstreg, dst - size); - pt_regs_write_reg(regs, srcreg, src - size); - } - } else { - /* Format is from Option A */ - if (size & BIT(63)) { - /* Forward copy */ - pt_regs_write_reg(regs, dstreg, dst + size); - pt_regs_write_reg(regs, srcreg, src + size); - pt_regs_write_reg(regs, sizereg, -size); - } - } - } - - if (esr & ESR_ELx_MOPS_ISS_FROM_EPILOGUE) - regs->pc -= 8; - else - regs->pc -= 4; + arm64_mops_reset_regs(®s->user_regs, esr); /* * If single stepping then finish the step before executing the diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 9cfe6bd1dbe4..f99d8af0b9af 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -30,6 +30,7 @@ #include #include #include +#include struct kvm_exception_table_entry { int insn, fixup; @@ -265,6 +266,22 @@ static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); } +static bool kvm_hyp_handle_mops(struct kvm_vcpu *vcpu, u64 *exit_code) +{ + *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR); + arm64_mops_reset_regs(vcpu_gp_regs(vcpu), vcpu->arch.fault.esr_el2); + write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR); + + /* + * Finish potential single step before executing the prologue + * instruction. + */ + *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; + write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); + + return true; +} + static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) { sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c353a06ee7e6..c50f8459e4fc 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -192,6 +192,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn pvm_exit_handlers[] = { @@ -203,6 +204,7 @@ static const exit_handler_fn pvm_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 6537f58b1a8c..796202f2e08f 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -126,6 +126,7 @@ static const exit_handler_fn hyp_exit_handlers[] = { [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low, [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low, [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth, + [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops, }; static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) -- cgit v1.2.3 From e0bb80c62cfd0c289c841d35e412e633299530e3 Mon Sep 17 00:00:00 2001 From: Kristina Martsenko Date: Fri, 22 Sep 2023 12:25:08 +0100 Subject: KVM: arm64: Expose MOPS instructions to guests Expose the Armv8.8 FEAT_MOPS feature to guests in the ID register and allow the MOPS instructions to be run in a guest. Only expose MOPS if the whole system supports it. Note, it is expected that guests do not use these instructions on MMIO, similarly to other instructions where ESR_EL2.ISV==0 such as LDP/STP. Signed-off-by: Kristina Martsenko Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20230922112508.1774352-3-kristina.martsenko@arm.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_arm.h | 4 +++- arch/arm64/kvm/hyp/include/nvhe/fixed_config.h | 3 ++- arch/arm64/kvm/sys_regs.c | 2 -- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 5882b2415596..2186927b1d21 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -102,7 +102,9 @@ #define HCR_HOST_NVHE_PROTECTED_FLAGS (HCR_HOST_NVHE_FLAGS | HCR_TSC) #define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H) -#define HCRX_GUEST_FLAGS (HCRX_EL2_SMPME | HCRX_EL2_TCR2En) +#define HCRX_GUEST_FLAGS \ + (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \ + (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0)) #define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) /* TCR_EL2 Registers bits */ diff --git a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h index 37440e1dda93..e91922daa8ca 100644 --- a/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h +++ b/arch/arm64/kvm/hyp/include/nvhe/fixed_config.h @@ -197,7 +197,8 @@ #define PVM_ID_AA64ISAR2_ALLOW (\ ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3) | \ - ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) | \ + ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS) \ ) u64 pvm_read_id_reg(const struct kvm_vcpu *vcpu, u32 id); diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 57c8190d5438..9601af2aa062 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1344,7 +1344,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu, ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3)); if (!cpus_have_final_cap(ARM64_HAS_WFXT)) val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT); - val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_MOPS); break; case SYS_ID_AA64MMFR2_EL1: val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK; @@ -2095,7 +2094,6 @@ static const struct sys_reg_desc sys_reg_descs[] = { ID_AA64ISAR1_EL1_API | ID_AA64ISAR1_EL1_APA)), ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 | - ID_AA64ISAR2_EL1_MOPS | ID_AA64ISAR2_EL1_APA3 | ID_AA64ISAR2_EL1_GPA3)), ID_UNALLOCATED(6,3), -- cgit v1.2.3 From 02e85f74668e1aa0062708b58d8aef020054e56c Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:36 +0000 Subject: tools: arm64: Add a Makefile for generating sysreg-defs.h Use a common Makefile for generating sysreg-defs.h, which will soon be needed by perf and KVM selftests. The naming scheme of the generated macros is not expected to change, so just refer to the canonical script/data in the kernel source rather than copying to tools. Co-developed-by: Jing Zhang Signed-off-by: Jing Zhang Reviewed-by: Mark Brown Reviewed-by: Eric Auger Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/arch/arm64/include/.gitignore | 1 + tools/arch/arm64/tools/Makefile | 38 +++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 tools/arch/arm64/include/.gitignore create mode 100644 tools/arch/arm64/tools/Makefile diff --git a/tools/arch/arm64/include/.gitignore b/tools/arch/arm64/include/.gitignore new file mode 100644 index 000000000000..9ab870da897d --- /dev/null +++ b/tools/arch/arm64/include/.gitignore @@ -0,0 +1 @@ +generated/ diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile new file mode 100644 index 000000000000..f867e6036c62 --- /dev/null +++ b/tools/arch/arm64/tools/Makefile @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0 + +ifeq ($(srctree),) +srctree := $(patsubst %/,%,$(dir $(CURDIR))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +srctree := $(patsubst %/,%,$(dir $(srctree))) +endif + +include $(srctree)/tools/scripts/Makefile.include + +AWK ?= awk +MKDIR ?= mkdir +RM ?= rm + +ifeq ($(V),1) +Q = +else +Q = @ +endif + +arm64_tools_dir = $(srctree)/arch/arm64/tools +arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg +arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk +arm64_generated_dir = $(srctree)/tools/arch/arm64/include/generated +arm64_sysreg_defs = $(arm64_generated_dir)/asm/sysreg-defs.h + +all: $(arm64_sysreg_defs) + @: + +$(arm64_sysreg_defs): $(arm64_gen_sysreg) $(arm64_sysreg_tbl) + $(Q)$(MKDIR) -p $(dir $@) + $(QUIET_GEN)$(AWK) -f $^ > $@ + +clean: + $(Q)$(RM) -rf $(arm64_generated_dir) + +.PHONY: all clean -- cgit v1.2.3 From e2bdd172e6652c2f5554d125a5048bc9f9b0dfa3 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:37 +0000 Subject: perf build: Generate arm64's sysreg-defs.h and add to include path Start generating sysreg-defs.h in anticipation of updating sysreg.h to a version that needs the generated output. Acked-by: Arnaldo Carvalho de Melo Acked-by: Namhyung Kim Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/perf/Makefile.perf | 15 +++++++++++++-- tools/perf/util/Build | 2 +- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 37af6df7b978..14dedd11a1f5 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -443,6 +443,15 @@ drm_ioctl_tbl := $(srctree)/tools/perf/trace/beauty/drm_ioctl.sh # Create output directory if not already present _dummy := $(shell [ -d '$(beauty_ioctl_outdir)' ] || mkdir -p '$(beauty_ioctl_outdir)') +arm64_gen_sysreg_dir := $(srctree)/tools/arch/arm64/tools + +arm64-sysreg-defs: FORCE + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) + +arm64-sysreg-defs-clean: + $(call QUIET_CLEAN,arm64-sysreg-defs) + $(Q)$(MAKE) -C $(arm64_gen_sysreg_dir) clean > /dev/null + $(drm_ioctl_array): $(drm_hdr_dir)/drm.h $(drm_hdr_dir)/i915_drm.h $(drm_ioctl_tbl) $(Q)$(SHELL) '$(drm_ioctl_tbl)' $(drm_hdr_dir) > $@ @@ -716,7 +725,9 @@ endif __build-dir = $(subst $(OUTPUT),,$(dir $@)) build-dir = $(or $(__build-dir),.) -prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders $(drm_ioctl_array) \ +prepare: $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h archheaders \ + arm64-sysreg-defs \ + $(drm_ioctl_array) \ $(fadvise_advice_array) \ $(fsconfig_arrays) \ $(fsmount_arrays) \ @@ -1125,7 +1136,7 @@ endif # BUILD_BPF_SKEL bpf-skel-clean: $(call QUIET_CLEAN, bpf-skel) $(RM) -r $(SKEL_TMP_OUT) $(SKELETONS) -clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean +clean:: $(LIBAPI)-clean $(LIBBPF)-clean $(LIBSUBCMD)-clean $(LIBSYMBOL)-clean $(LIBPERF)-clean arm64-sysreg-defs-clean fixdep-clean python-clean bpf-skel-clean tests-coresight-targets-clean $(call QUIET_CLEAN, core-objs) $(RM) $(LIBPERF_A) $(OUTPUT)perf-archive $(OUTPUT)perf-iostat $(LANG_BINDINGS) $(Q)find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete $(Q)$(RM) $(OUTPUT).config-detected diff --git a/tools/perf/util/Build b/tools/perf/util/Build index 6d657c9927f7..2f76230958ad 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -345,7 +345,7 @@ CFLAGS_rbtree.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ET CFLAGS_libstring.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_hweight.o += -Wno-unused-parameter -DETC_PERFCONFIG="BUILD_STR($(ETC_PERFCONFIG_SQ))" CFLAGS_header.o += -include $(OUTPUT)PERF-VERSION-FILE -CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ +CFLAGS_arm-spe.o += -I$(srctree)/tools/arch/arm64/include/ -I$(srctree)/tools/arch/arm64/include/generated/ $(OUTPUT)util/argv_split.o: ../lib/argv_split.c FORCE $(call rule_mkdir) -- cgit v1.2.3 From 9697d84cc3b6d9bff4b1fbffc10a4bb1398af9ba Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 11 Oct 2023 19:57:38 +0000 Subject: KVM: selftests: Generate sysreg-defs.h and add to include path Start generating sysreg-defs.h for arm64 builds in anticipation of updating sysreg.h to a version that depends on it. Reviewed-by: Mark Brown Reviewed-by: Eric Auger Tested-by: Eric Auger Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/Makefile | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index a3bb36fb3cfc..07b3f4dc1a77 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -17,6 +17,17 @@ else ARCH_DIR := $(ARCH) endif +ifeq ($(ARCH),arm64) +arm64_tools_dir := $(top_srcdir)/tools/arch/arm64/tools/ +GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/ +CFLAGS += -I$(GEN_HDRS) + +prepare: + $(MAKE) -C $(arm64_tools_dir) +else +prepare: +endif + LIBKVM += lib/assert.c LIBKVM += lib/elf.c LIBKVM += lib/guest_modes.c @@ -256,13 +267,18 @@ $(TEST_GEN_OBJ): $(OUTPUT)/%.o: %.c $(SPLIT_TESTS_TARGETS): %: %.o $(SPLIT_TESTS_OBJS) $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) $(TARGET_ARCH) $^ $(LDLIBS) -o $@ -EXTRA_CLEAN += $(LIBKVM_OBJS) $(TEST_DEP_FILES) $(TEST_GEN_OBJ) $(SPLIT_TESTS_OBJS) cscope.* +EXTRA_CLEAN += $(GEN_HDRS) \ + $(LIBKVM_OBJS) \ + $(SPLIT_TESTS_OBJS) \ + $(TEST_DEP_FILES) \ + $(TEST_GEN_OBJ) \ + cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) -$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c +$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c prepare $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S +$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S prepare $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ # Compile the string overrides as freestanding to prevent the compiler from @@ -274,6 +290,7 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) $(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) +$(TEST_GEN_OBJ): prepare cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. cscope: -- cgit v1.2.3 From 0359c946b13153bd57fac65f4f3600ba5673e3de Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 11 Oct 2023 19:57:39 +0000 Subject: tools headers arm64: Update sysreg.h with kernel sources The users of sysreg.h (perf, KVM selftests) are now generating the necessary sysreg-defs.h; sync sysreg.h with the kernel sources and fix the KVM selftests that use macros which suffered a rename. Signed-off-by: Jing Zhang Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/arch/arm64/include/asm/gpr-num.h | 26 + tools/arch/arm64/include/asm/sysreg.h | 839 +++++---------------- .../selftests/kvm/aarch64/aarch32_id_regs.c | 4 +- .../selftests/kvm/aarch64/debug-exceptions.c | 12 +- .../selftests/kvm/aarch64/page_fault_test.c | 6 +- .../testing/selftests/kvm/lib/aarch64/processor.c | 6 +- 6 files changed, 232 insertions(+), 661 deletions(-) create mode 100644 tools/arch/arm64/include/asm/gpr-num.h diff --git a/tools/arch/arm64/include/asm/gpr-num.h b/tools/arch/arm64/include/asm/gpr-num.h new file mode 100644 index 000000000000..05da4a7c5788 --- /dev/null +++ b/tools/arch/arm64/include/asm/gpr-num.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 + .equ .L__gpr_num_x\num, \num + .equ .L__gpr_num_w\num, \num + .endr + .equ .L__gpr_num_xzr, 31 + .equ .L__gpr_num_wzr, 31 + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .equ .L__gpr_num_w\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__gpr_num_xzr, 31\n" \ +" .equ .L__gpr_num_wzr, 31\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/tools/arch/arm64/include/asm/sysreg.h b/tools/arch/arm64/include/asm/sysreg.h index 7640fa27be94..ccc13e991376 100644 --- a/tools/arch/arm64/include/asm/sysreg.h +++ b/tools/arch/arm64/include/asm/sysreg.h @@ -12,6 +12,8 @@ #include #include +#include + /* * ARMv8 ARM reserves the following encoding for system registers: * (Ref: ARMv8 ARM, Section: "System instruction class encoding overview", @@ -87,20 +89,24 @@ */ #define pstate_field(op1, op2) ((op1) << Op1_shift | (op2) << Op2_shift) #define PSTATE_Imm_shift CRm_shift +#define SET_PSTATE(x, r) __emit_inst(0xd500401f | PSTATE_ ## r | ((!!x) << PSTATE_Imm_shift)) #define PSTATE_PAN pstate_field(0, 4) #define PSTATE_UAO pstate_field(0, 3) #define PSTATE_SSBS pstate_field(3, 1) +#define PSTATE_DIT pstate_field(3, 2) #define PSTATE_TCO pstate_field(3, 4) -#define SET_PSTATE_PAN(x) __emit_inst(0xd500401f | PSTATE_PAN | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_UAO(x) __emit_inst(0xd500401f | PSTATE_UAO | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_SSBS(x) __emit_inst(0xd500401f | PSTATE_SSBS | ((!!x) << PSTATE_Imm_shift)) -#define SET_PSTATE_TCO(x) __emit_inst(0xd500401f | PSTATE_TCO | ((!!x) << PSTATE_Imm_shift)) +#define SET_PSTATE_PAN(x) SET_PSTATE((x), PAN) +#define SET_PSTATE_UAO(x) SET_PSTATE((x), UAO) +#define SET_PSTATE_SSBS(x) SET_PSTATE((x), SSBS) +#define SET_PSTATE_DIT(x) SET_PSTATE((x), DIT) +#define SET_PSTATE_TCO(x) SET_PSTATE((x), TCO) #define set_pstate_pan(x) asm volatile(SET_PSTATE_PAN(x)) #define set_pstate_uao(x) asm volatile(SET_PSTATE_UAO(x)) #define set_pstate_ssbs(x) asm volatile(SET_PSTATE_SSBS(x)) +#define set_pstate_dit(x) asm volatile(SET_PSTATE_DIT(x)) #define __SYS_BARRIER_INSN(CRm, op2, Rt) \ __emit_inst(0xd5000000 | sys_insn(0, 3, 3, (CRm), (op2)) | ((Rt) & 0x1f)) @@ -108,25 +114,43 @@ #define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31) #define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2) +#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4) +#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6) #define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2) +#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4) +#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6) #define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2) +#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4) +#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6) + +/* + * Automatically generated definitions for system registers, the + * manual encodings below are in the process of being converted to + * come from here. The header relies on the definition of sys_reg() + * earlier in this file. + */ +#include "asm/sysreg-defs.h" /* * System registers, organised loosely by encoding but grouped together * where the architected name contains an index. e.g. ID_MMFR_EL1. */ -#define SYS_OSDTRRX_EL1 sys_reg(2, 0, 0, 0, 2) -#define SYS_MDCCINT_EL1 sys_reg(2, 0, 0, 2, 0) -#define SYS_MDSCR_EL1 sys_reg(2, 0, 0, 2, 2) -#define SYS_OSDTRTX_EL1 sys_reg(2, 0, 0, 3, 2) -#define SYS_OSECCR_EL1 sys_reg(2, 0, 0, 6, 2) +#define SYS_SVCR_SMSTOP_SM_EL0 sys_reg(0, 3, 4, 2, 3) +#define SYS_SVCR_SMSTART_SM_EL0 sys_reg(0, 3, 4, 3, 3) +#define SYS_SVCR_SMSTOP_SMZA_EL0 sys_reg(0, 3, 4, 6, 3) + #define SYS_DBGBVRn_EL1(n) sys_reg(2, 0, 0, n, 4) #define SYS_DBGBCRn_EL1(n) sys_reg(2, 0, 0, n, 5) #define SYS_DBGWVRn_EL1(n) sys_reg(2, 0, 0, n, 6) #define SYS_DBGWCRn_EL1(n) sys_reg(2, 0, 0, n, 7) #define SYS_MDRAR_EL1 sys_reg(2, 0, 1, 0, 0) -#define SYS_OSLAR_EL1 sys_reg(2, 0, 1, 0, 4) + #define SYS_OSLSR_EL1 sys_reg(2, 0, 1, 1, 4) +#define OSLSR_EL1_OSLM_MASK (BIT(3) | BIT(0)) +#define OSLSR_EL1_OSLM_NI 0 +#define OSLSR_EL1_OSLM_IMPLEMENTED BIT(3) +#define OSLSR_EL1_OSLK BIT(1) + #define SYS_OSDLR_EL1 sys_reg(2, 0, 1, 3, 4) #define SYS_DBGPRCR_EL1 sys_reg(2, 0, 1, 4, 4) #define SYS_DBGCLAIMSET_EL1 sys_reg(2, 0, 7, 8, 6) @@ -142,59 +166,12 @@ #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) -#define SYS_ID_PFR0_EL1 sys_reg(3, 0, 0, 1, 0) -#define SYS_ID_PFR1_EL1 sys_reg(3, 0, 0, 1, 1) -#define SYS_ID_PFR2_EL1 sys_reg(3, 0, 0, 3, 4) -#define SYS_ID_DFR0_EL1 sys_reg(3, 0, 0, 1, 2) -#define SYS_ID_DFR1_EL1 sys_reg(3, 0, 0, 3, 5) -#define SYS_ID_AFR0_EL1 sys_reg(3, 0, 0, 1, 3) -#define SYS_ID_MMFR0_EL1 sys_reg(3, 0, 0, 1, 4) -#define SYS_ID_MMFR1_EL1 sys_reg(3, 0, 0, 1, 5) -#define SYS_ID_MMFR2_EL1 sys_reg(3, 0, 0, 1, 6) -#define SYS_ID_MMFR3_EL1 sys_reg(3, 0, 0, 1, 7) -#define SYS_ID_MMFR4_EL1 sys_reg(3, 0, 0, 2, 6) -#define SYS_ID_MMFR5_EL1 sys_reg(3, 0, 0, 3, 6) - -#define SYS_ID_ISAR0_EL1 sys_reg(3, 0, 0, 2, 0) -#define SYS_ID_ISAR1_EL1 sys_reg(3, 0, 0, 2, 1) -#define SYS_ID_ISAR2_EL1 sys_reg(3, 0, 0, 2, 2) -#define SYS_ID_ISAR3_EL1 sys_reg(3, 0, 0, 2, 3) -#define SYS_ID_ISAR4_EL1 sys_reg(3, 0, 0, 2, 4) -#define SYS_ID_ISAR5_EL1 sys_reg(3, 0, 0, 2, 5) -#define SYS_ID_ISAR6_EL1 sys_reg(3, 0, 0, 2, 7) - -#define SYS_MVFR0_EL1 sys_reg(3, 0, 0, 3, 0) -#define SYS_MVFR1_EL1 sys_reg(3, 0, 0, 3, 1) -#define SYS_MVFR2_EL1 sys_reg(3, 0, 0, 3, 2) - -#define SYS_ID_AA64PFR0_EL1 sys_reg(3, 0, 0, 4, 0) -#define SYS_ID_AA64PFR1_EL1 sys_reg(3, 0, 0, 4, 1) -#define SYS_ID_AA64ZFR0_EL1 sys_reg(3, 0, 0, 4, 4) - -#define SYS_ID_AA64DFR0_EL1 sys_reg(3, 0, 0, 5, 0) -#define SYS_ID_AA64DFR1_EL1 sys_reg(3, 0, 0, 5, 1) - -#define SYS_ID_AA64AFR0_EL1 sys_reg(3, 0, 0, 5, 4) -#define SYS_ID_AA64AFR1_EL1 sys_reg(3, 0, 0, 5, 5) - -#define SYS_ID_AA64ISAR0_EL1 sys_reg(3, 0, 0, 6, 0) -#define SYS_ID_AA64ISAR1_EL1 sys_reg(3, 0, 0, 6, 1) - -#define SYS_ID_AA64MMFR0_EL1 sys_reg(3, 0, 0, 7, 0) -#define SYS_ID_AA64MMFR1_EL1 sys_reg(3, 0, 0, 7, 1) -#define SYS_ID_AA64MMFR2_EL1 sys_reg(3, 0, 0, 7, 2) - -#define SYS_SCTLR_EL1 sys_reg(3, 0, 1, 0, 0) #define SYS_ACTLR_EL1 sys_reg(3, 0, 1, 0, 1) -#define SYS_CPACR_EL1 sys_reg(3, 0, 1, 0, 2) #define SYS_RGSR_EL1 sys_reg(3, 0, 1, 0, 5) #define SYS_GCR_EL1 sys_reg(3, 0, 1, 0, 6) -#define SYS_ZCR_EL1 sys_reg(3, 0, 1, 2, 0) #define SYS_TRFCR_EL1 sys_reg(3, 0, 1, 2, 1) -#define SYS_TTBR0_EL1 sys_reg(3, 0, 2, 0, 0) -#define SYS_TTBR1_EL1 sys_reg(3, 0, 2, 0, 1) #define SYS_TCR_EL1 sys_reg(3, 0, 2, 0, 2) #define SYS_APIAKEYLO_EL1 sys_reg(3, 0, 2, 1, 0) @@ -230,159 +207,33 @@ #define SYS_TFSR_EL1 sys_reg(3, 0, 5, 6, 0) #define SYS_TFSRE0_EL1 sys_reg(3, 0, 5, 6, 1) -#define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) #define SYS_PAR_EL1_F BIT(0) #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) - -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL - -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) - -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ -/* - * TRBE Registers - */ -#define SYS_TRBLIMITR_EL1 sys_reg(3, 0, 9, 11, 0) -#define SYS_TRBPTR_EL1 sys_reg(3, 0, 9, 11, 1) -#define SYS_TRBBASER_EL1 sys_reg(3, 0, 9, 11, 2) -#define SYS_TRBSR_EL1 sys_reg(3, 0, 9, 11, 3) -#define SYS_TRBMAR_EL1 sys_reg(3, 0, 9, 11, 4) -#define SYS_TRBTRG_EL1 sys_reg(3, 0, 9, 11, 6) -#define SYS_TRBIDR_EL1 sys_reg(3, 0, 9, 11, 7) - -#define TRBLIMITR_LIMIT_MASK GENMASK_ULL(51, 0) -#define TRBLIMITR_LIMIT_SHIFT 12 -#define TRBLIMITR_NVM BIT(5) -#define TRBLIMITR_TRIG_MODE_MASK GENMASK(1, 0) -#define TRBLIMITR_TRIG_MODE_SHIFT 3 -#define TRBLIMITR_FILL_MODE_MASK GENMASK(1, 0) -#define TRBLIMITR_FILL_MODE_SHIFT 1 -#define TRBLIMITR_ENABLE BIT(0) -#define TRBPTR_PTR_MASK GENMASK_ULL(63, 0) -#define TRBPTR_PTR_SHIFT 0 -#define TRBBASER_BASE_MASK GENMASK_ULL(51, 0) -#define TRBBASER_BASE_SHIFT 12 -#define TRBSR_EC_MASK GENMASK(5, 0) -#define TRBSR_EC_SHIFT 26 -#define TRBSR_IRQ BIT(22) -#define TRBSR_TRG BIT(21) -#define TRBSR_WRAP BIT(20) -#define TRBSR_ABORT BIT(18) -#define TRBSR_STOP BIT(17) -#define TRBSR_MSS_MASK GENMASK(15, 0) -#define TRBSR_MSS_SHIFT 0 -#define TRBSR_BSC_MASK GENMASK(5, 0) -#define TRBSR_BSC_SHIFT 0 -#define TRBSR_FSC_MASK GENMASK(5, 0) -#define TRBSR_FSC_SHIFT 0 -#define TRBMAR_SHARE_MASK GENMASK(1, 0) -#define TRBMAR_SHARE_SHIFT 8 -#define TRBMAR_OUTER_MASK GENMASK(3, 0) -#define TRBMAR_OUTER_SHIFT 4 -#define TRBMAR_INNER_MASK GENMASK(3, 0) -#define TRBMAR_INNER_SHIFT 0 -#define TRBTRG_TRG_MASK GENMASK(31, 0) -#define TRBTRG_TRG_SHIFT 0 -#define TRBIDR_FLAG BIT(5) -#define TRBIDR_PROG BIT(4) -#define TRBIDR_ALIGN_MASK GENMASK(3, 0) -#define TRBIDR_ALIGN_SHIFT 0 +#define TRBSR_EL1_BSC_MASK GENMASK(5, 0) +#define TRBSR_EL1_BSC_SHIFT 0 #define SYS_PMINTENSET_EL1 sys_reg(3, 0, 9, 14, 1) #define SYS_PMINTENCLR_EL1 sys_reg(3, 0, 9, 14, 2) @@ -392,12 +243,6 @@ #define SYS_MAIR_EL1 sys_reg(3, 0, 10, 2, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) -#define SYS_LORSA_EL1 sys_reg(3, 0, 10, 4, 0) -#define SYS_LOREA_EL1 sys_reg(3, 0, 10, 4, 1) -#define SYS_LORN_EL1 sys_reg(3, 0, 10, 4, 2) -#define SYS_LORC_EL1 sys_reg(3, 0, 10, 4, 3) -#define SYS_LORID_EL1 sys_reg(3, 0, 10, 4, 7) - #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) #define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) @@ -429,23 +274,10 @@ #define SYS_ICC_IGRPEN0_EL1 sys_reg(3, 0, 12, 12, 6) #define SYS_ICC_IGRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) -#define SYS_CONTEXTIDR_EL1 sys_reg(3, 0, 13, 0, 1) -#define SYS_TPIDR_EL1 sys_reg(3, 0, 13, 0, 4) - -#define SYS_SCXTNUM_EL1 sys_reg(3, 0, 13, 0, 7) - #define SYS_CNTKCTL_EL1 sys_reg(3, 0, 14, 1, 0) -#define SYS_CCSIDR_EL1 sys_reg(3, 1, 0, 0, 0) -#define SYS_CLIDR_EL1 sys_reg(3, 1, 0, 0, 1) -#define SYS_GMID_EL1 sys_reg(3, 1, 0, 0, 4) #define SYS_AIDR_EL1 sys_reg(3, 1, 0, 0, 7) -#define SYS_CSSELR_EL1 sys_reg(3, 2, 0, 0, 0) - -#define SYS_CTR_EL0 sys_reg(3, 3, 0, 0, 1) -#define SYS_DCZID_EL0 sys_reg(3, 3, 0, 0, 7) - #define SYS_RNDR_EL0 sys_reg(3, 3, 2, 4, 0) #define SYS_RNDRRS_EL0 sys_reg(3, 3, 2, 4, 1) @@ -465,6 +297,7 @@ #define SYS_TPIDR_EL0 sys_reg(3, 3, 13, 0, 2) #define SYS_TPIDRRO_EL0 sys_reg(3, 3, 13, 0, 3) +#define SYS_TPIDR2_EL0 sys_reg(3, 3, 13, 0, 5) #define SYS_SCXTNUM_EL0 sys_reg(3, 3, 13, 0, 7) @@ -506,6 +339,10 @@ #define SYS_CNTFRQ_EL0 sys_reg(3, 3, 14, 0, 0) +#define SYS_CNTPCT_EL0 sys_reg(3, 3, 14, 0, 1) +#define SYS_CNTPCTSS_EL0 sys_reg(3, 3, 14, 0, 5) +#define SYS_CNTVCTSS_EL0 sys_reg(3, 3, 14, 0, 6) + #define SYS_CNTP_TVAL_EL0 sys_reg(3, 3, 14, 2, 0) #define SYS_CNTP_CTL_EL0 sys_reg(3, 3, 14, 2, 1) #define SYS_CNTP_CVAL_EL0 sys_reg(3, 3, 14, 2, 2) @@ -515,7 +352,9 @@ #define SYS_AARCH32_CNTP_TVAL sys_reg(0, 0, 14, 2, 0) #define SYS_AARCH32_CNTP_CTL sys_reg(0, 0, 14, 2, 1) +#define SYS_AARCH32_CNTPCT sys_reg(0, 0, 0, 14, 0) #define SYS_AARCH32_CNTP_CVAL sys_reg(0, 2, 0, 14, 0) +#define SYS_AARCH32_CNTPCTSS sys_reg(0, 8, 0, 14, 0) #define __PMEV_op2(n) ((n) & 0x7) #define __CNTR_CRm(n) (0x8 | (((n) >> 3) & 0x3)) @@ -525,26 +364,48 @@ #define SYS_PMCCFILTR_EL0 sys_reg(3, 3, 14, 15, 7) +#define SYS_VPIDR_EL2 sys_reg(3, 4, 0, 0, 0) +#define SYS_VMPIDR_EL2 sys_reg(3, 4, 0, 0, 5) + #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) -#define SYS_HFGRTR_EL2 sys_reg(3, 4, 1, 1, 4) -#define SYS_HFGWTR_EL2 sys_reg(3, 4, 1, 1, 5) -#define SYS_HFGITR_EL2 sys_reg(3, 4, 1, 1, 6) -#define SYS_ZCR_EL2 sys_reg(3, 4, 1, 2, 0) +#define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1) +#define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0) +#define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) +#define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) +#define SYS_HSTR_EL2 sys_reg(3, 4, 1, 1, 3) +#define SYS_HACR_EL2 sys_reg(3, 4, 1, 1, 7) + +#define SYS_TTBR0_EL2 sys_reg(3, 4, 2, 0, 0) +#define SYS_TTBR1_EL2 sys_reg(3, 4, 2, 0, 1) +#define SYS_TCR_EL2 sys_reg(3, 4, 2, 0, 2) +#define SYS_VTTBR_EL2 sys_reg(3, 4, 2, 1, 0) +#define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) + #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) -#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) #define SYS_HDFGRTR_EL2 sys_reg(3, 4, 3, 1, 4) #define SYS_HDFGWTR_EL2 sys_reg(3, 4, 3, 1, 5) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) +#define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) +#define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0) +#define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1) #define SYS_ESR_EL2 sys_reg(3, 4, 5, 2, 0) #define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3) #define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) #define SYS_TFSR_EL2 sys_reg(3, 4, 5, 6, 0) + #define SYS_FAR_EL2 sys_reg(3, 4, 6, 0, 0) +#define SYS_HPFAR_EL2 sys_reg(3, 4, 6, 0, 4) + +#define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) +#define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) -#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) +#define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) +#define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) +#define SYS_RMR_EL2 sys_reg(3, 4, 12, 0, 2) +#define SYS_VDISR_EL2 sys_reg(3, 4, 12, 1, 1) #define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) #define SYS_ICH_AP0R0_EL2 __SYS__AP0Rx_EL2(0) #define SYS_ICH_AP0R1_EL2 __SYS__AP0Rx_EL2(1) @@ -586,10 +447,14 @@ #define SYS_ICH_LR14_EL2 __SYS__LR8_EL2(6) #define SYS_ICH_LR15_EL2 __SYS__LR8_EL2(7) +#define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1) +#define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2) + +#define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3) +#define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) + /* VHE encodings for architectural EL0/1 system registers */ #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) -#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) -#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) @@ -599,11 +464,9 @@ #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) -#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) -#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) @@ -612,37 +475,41 @@ #define SYS_CNTV_CTL_EL02 sys_reg(3, 5, 14, 3, 1) #define SYS_CNTV_CVAL_EL02 sys_reg(3, 5, 14, 3, 2) +#define SYS_SP_EL2 sys_reg(3, 6, 4, 1, 0) + /* Common SCTLR_ELx flags. */ +#define SCTLR_ELx_ENTP2 (BIT(60)) #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) -#define SCTLR_ELx_TCF_SHIFT 40 -#define SCTLR_ELx_TCF_NONE (UL(0x0) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_SYNC (UL(0x1) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_ASYNC (UL(0x2) << SCTLR_ELx_TCF_SHIFT) -#define SCTLR_ELx_TCF_MASK (UL(0x3) << SCTLR_ELx_TCF_SHIFT) - +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 -#define SCTLR_ELx_ITFSB (BIT(37)) -#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) -#define SCTLR_ELx_ENIB (BIT(30)) -#define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) -#define SCTLR_ELx_IESB (BIT(21)) -#define SCTLR_ELx_WXN (BIT(19)) -#define SCTLR_ELx_ENDB (BIT(13)) -#define SCTLR_ELx_I (BIT(12)) -#define SCTLR_ELx_SA (BIT(3)) -#define SCTLR_ELx_C (BIT(2)) -#define SCTLR_ELx_A (BIT(1)) -#define SCTLR_ELx_M (BIT(0)) +#define SCTLR_ELx_ITFSB (BIT(37)) +#define SCTLR_ELx_ENIA (BIT(SCTLR_ELx_ENIA_SHIFT)) +#define SCTLR_ELx_ENIB (BIT(30)) +#define SCTLR_ELx_LSMAOE (BIT(29)) +#define SCTLR_ELx_nTLSMD (BIT(28)) +#define SCTLR_ELx_ENDA (BIT(27)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) +#define SCTLR_ELx_EIS (BIT(22)) +#define SCTLR_ELx_IESB (BIT(21)) +#define SCTLR_ELx_TSCXT (BIT(20)) +#define SCTLR_ELx_WXN (BIT(19)) +#define SCTLR_ELx_ENDB (BIT(13)) +#define SCTLR_ELx_I (BIT(12)) +#define SCTLR_ELx_EOS (BIT(11)) +#define SCTLR_ELx_SA (BIT(3)) +#define SCTLR_ELx_C (BIT(2)) +#define SCTLR_ELx_A (BIT(1)) +#define SCTLR_ELx_M (BIT(0)) /* SCTLR_EL2 specific flags. */ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ (BIT(29))) +#define SCTLR_EL2_BT (BIT(36)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE #else @@ -658,33 +525,6 @@ (SCTLR_EL2_RES1 | ENDIAN_SET_EL2) /* SCTLR_EL1 specific flags. */ -#define SCTLR_EL1_EPAN (BIT(57)) -#define SCTLR_EL1_ATA0 (BIT(42)) - -#define SCTLR_EL1_TCF0_SHIFT 38 -#define SCTLR_EL1_TCF0_NONE (UL(0x0) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_SYNC (UL(0x1) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_ASYNC (UL(0x2) << SCTLR_EL1_TCF0_SHIFT) -#define SCTLR_EL1_TCF0_MASK (UL(0x3) << SCTLR_EL1_TCF0_SHIFT) - -#define SCTLR_EL1_BT1 (BIT(36)) -#define SCTLR_EL1_BT0 (BIT(35)) -#define SCTLR_EL1_UCI (BIT(26)) -#define SCTLR_EL1_E0E (BIT(24)) -#define SCTLR_EL1_SPAN (BIT(23)) -#define SCTLR_EL1_NTWE (BIT(18)) -#define SCTLR_EL1_NTWI (BIT(16)) -#define SCTLR_EL1_UCT (BIT(15)) -#define SCTLR_EL1_DZE (BIT(14)) -#define SCTLR_EL1_UMA (BIT(9)) -#define SCTLR_EL1_SED (BIT(8)) -#define SCTLR_EL1_ITD (BIT(7)) -#define SCTLR_EL1_CP15BEN (BIT(5)) -#define SCTLR_EL1_SA0 (BIT(4)) - -#define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ - (BIT(29))) - #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #else @@ -692,14 +532,17 @@ #endif #define INIT_SCTLR_EL1_MMU_OFF \ - (ENDIAN_SET_EL1 | SCTLR_EL1_RES1) + (ENDIAN_SET_EL1 | SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | \ + SCTLR_EL1_EIS | SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) #define INIT_SCTLR_EL1_MMU_ON \ - (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ - SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ - SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ - SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ - SCTLR_EL1_EPAN | SCTLR_EL1_RES1) + (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | \ + SCTLR_EL1_SA0 | SCTLR_EL1_SED | SCTLR_ELx_I | \ + SCTLR_EL1_DZE | SCTLR_EL1_UCT | SCTLR_EL1_nTWE | \ + SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | \ + SCTLR_EL1_LSMAOE | SCTLR_EL1_nTLSMD | SCTLR_EL1_EIS | \ + SCTLR_EL1_TSCXT | SCTLR_EL1_EOS) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) @@ -712,387 +555,68 @@ /* Position the attr at the correct index */ #define MAIR_ATTRIDX(attr, idx) ((attr) << ((idx) * 8)) -/* id_aa64isar0 */ -#define ID_AA64ISAR0_RNDR_SHIFT 60 -#define ID_AA64ISAR0_TLB_SHIFT 56 -#define ID_AA64ISAR0_TS_SHIFT 52 -#define ID_AA64ISAR0_FHM_SHIFT 48 -#define ID_AA64ISAR0_DP_SHIFT 44 -#define ID_AA64ISAR0_SM4_SHIFT 40 -#define ID_AA64ISAR0_SM3_SHIFT 36 -#define ID_AA64ISAR0_SHA3_SHIFT 32 -#define ID_AA64ISAR0_RDM_SHIFT 28 -#define ID_AA64ISAR0_ATOMICS_SHIFT 20 -#define ID_AA64ISAR0_CRC32_SHIFT 16 -#define ID_AA64ISAR0_SHA2_SHIFT 12 -#define ID_AA64ISAR0_SHA1_SHIFT 8 -#define ID_AA64ISAR0_AES_SHIFT 4 - -#define ID_AA64ISAR0_TLB_RANGE_NI 0x0 -#define ID_AA64ISAR0_TLB_RANGE 0x2 - -/* id_aa64isar1 */ -#define ID_AA64ISAR1_I8MM_SHIFT 52 -#define ID_AA64ISAR1_DGH_SHIFT 48 -#define ID_AA64ISAR1_BF16_SHIFT 44 -#define ID_AA64ISAR1_SPECRES_SHIFT 40 -#define ID_AA64ISAR1_SB_SHIFT 36 -#define ID_AA64ISAR1_FRINTTS_SHIFT 32 -#define ID_AA64ISAR1_GPI_SHIFT 28 -#define ID_AA64ISAR1_GPA_SHIFT 24 -#define ID_AA64ISAR1_LRCPC_SHIFT 20 -#define ID_AA64ISAR1_FCMA_SHIFT 16 -#define ID_AA64ISAR1_JSCVT_SHIFT 12 -#define ID_AA64ISAR1_API_SHIFT 8 -#define ID_AA64ISAR1_APA_SHIFT 4 -#define ID_AA64ISAR1_DPB_SHIFT 0 - -#define ID_AA64ISAR1_APA_NI 0x0 -#define ID_AA64ISAR1_APA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_APA_ARCH_EPAC 0x2 -#define ID_AA64ISAR1_APA_ARCH_EPAC2 0x3 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_APA_ARCH_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_API_NI 0x0 -#define ID_AA64ISAR1_API_IMP_DEF 0x1 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC 0x2 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2 0x3 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC 0x4 -#define ID_AA64ISAR1_API_IMP_DEF_EPAC2_FPAC_CMB 0x5 -#define ID_AA64ISAR1_GPA_NI 0x0 -#define ID_AA64ISAR1_GPA_ARCHITECTED 0x1 -#define ID_AA64ISAR1_GPI_NI 0x0 -#define ID_AA64ISAR1_GPI_IMP_DEF 0x1 - /* id_aa64pfr0 */ -#define ID_AA64PFR0_CSV3_SHIFT 60 -#define ID_AA64PFR0_CSV2_SHIFT 56 -#define ID_AA64PFR0_DIT_SHIFT 48 -#define ID_AA64PFR0_AMU_SHIFT 44 -#define ID_AA64PFR0_MPAM_SHIFT 40 -#define ID_AA64PFR0_SEL2_SHIFT 36 -#define ID_AA64PFR0_SVE_SHIFT 32 -#define ID_AA64PFR0_RAS_SHIFT 28 -#define ID_AA64PFR0_GIC_SHIFT 24 -#define ID_AA64PFR0_ASIMD_SHIFT 20 -#define ID_AA64PFR0_FP_SHIFT 16 -#define ID_AA64PFR0_EL3_SHIFT 12 -#define ID_AA64PFR0_EL2_SHIFT 8 -#define ID_AA64PFR0_EL1_SHIFT 4 -#define ID_AA64PFR0_EL0_SHIFT 0 - -#define ID_AA64PFR0_AMU 0x1 -#define ID_AA64PFR0_SVE 0x1 -#define ID_AA64PFR0_RAS_V1 0x1 -#define ID_AA64PFR0_RAS_V1P1 0x2 -#define ID_AA64PFR0_FP_NI 0xf -#define ID_AA64PFR0_FP_SUPPORTED 0x0 -#define ID_AA64PFR0_ASIMD_NI 0xf -#define ID_AA64PFR0_ASIMD_SUPPORTED 0x0 -#define ID_AA64PFR0_ELx_64BIT_ONLY 0x1 -#define ID_AA64PFR0_ELx_32BIT_64BIT 0x2 - -/* id_aa64pfr1 */ -#define ID_AA64PFR1_MPAMFRAC_SHIFT 16 -#define ID_AA64PFR1_RASFRAC_SHIFT 12 -#define ID_AA64PFR1_MTE_SHIFT 8 -#define ID_AA64PFR1_SSBS_SHIFT 4 -#define ID_AA64PFR1_BT_SHIFT 0 - -#define ID_AA64PFR1_SSBS_PSTATE_NI 0 -#define ID_AA64PFR1_SSBS_PSTATE_ONLY 1 -#define ID_AA64PFR1_SSBS_PSTATE_INSNS 2 -#define ID_AA64PFR1_BT_BTI 0x1 - -#define ID_AA64PFR1_MTE_NI 0x0 -#define ID_AA64PFR1_MTE_EL0 0x1 -#define ID_AA64PFR1_MTE 0x2 - -/* id_aa64zfr0 */ -#define ID_AA64ZFR0_F64MM_SHIFT 56 -#define ID_AA64ZFR0_F32MM_SHIFT 52 -#define ID_AA64ZFR0_I8MM_SHIFT 44 -#define ID_AA64ZFR0_SM4_SHIFT 40 -#define ID_AA64ZFR0_SHA3_SHIFT 32 -#define ID_AA64ZFR0_BF16_SHIFT 20 -#define ID_AA64ZFR0_BITPERM_SHIFT 16 -#define ID_AA64ZFR0_AES_SHIFT 4 -#define ID_AA64ZFR0_SVEVER_SHIFT 0 - -#define ID_AA64ZFR0_F64MM 0x1 -#define ID_AA64ZFR0_F32MM 0x1 -#define ID_AA64ZFR0_I8MM 0x1 -#define ID_AA64ZFR0_BF16 0x1 -#define ID_AA64ZFR0_SM4 0x1 -#define ID_AA64ZFR0_SHA3 0x1 -#define ID_AA64ZFR0_BITPERM 0x1 -#define ID_AA64ZFR0_AES 0x1 -#define ID_AA64ZFR0_AES_PMULL 0x2 -#define ID_AA64ZFR0_SVEVER_SVE2 0x1 +#define ID_AA64PFR0_EL1_ELx_64BIT_ONLY 0x1 +#define ID_AA64PFR0_EL1_ELx_32BIT_64BIT 0x2 /* id_aa64mmfr0 */ -#define ID_AA64MMFR0_ECV_SHIFT 60 -#define ID_AA64MMFR0_FGT_SHIFT 56 -#define ID_AA64MMFR0_EXS_SHIFT 44 -#define ID_AA64MMFR0_TGRAN4_2_SHIFT 40 -#define ID_AA64MMFR0_TGRAN64_2_SHIFT 36 -#define ID_AA64MMFR0_TGRAN16_2_SHIFT 32 -#define ID_AA64MMFR0_TGRAN4_SHIFT 28 -#define ID_AA64MMFR0_TGRAN64_SHIFT 24 -#define ID_AA64MMFR0_TGRAN16_SHIFT 20 -#define ID_AA64MMFR0_BIGENDEL0_SHIFT 16 -#define ID_AA64MMFR0_SNSMEM_SHIFT 12 -#define ID_AA64MMFR0_BIGENDEL_SHIFT 8 -#define ID_AA64MMFR0_ASID_SHIFT 4 -#define ID_AA64MMFR0_PARANGE_SHIFT 0 - -#define ID_AA64MMFR0_ASID_8 0x0 -#define ID_AA64MMFR0_ASID_16 0x2 - -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 -#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf - -#define ID_AA64MMFR0_PARANGE_32 0x0 -#define ID_AA64MMFR0_PARANGE_36 0x1 -#define ID_AA64MMFR0_PARANGE_40 0x2 -#define ID_AA64MMFR0_PARANGE_42 0x3 -#define ID_AA64MMFR0_PARANGE_44 0x4 -#define ID_AA64MMFR0_PARANGE_48 0x5 -#define ID_AA64MMFR0_PARANGE_52 0x6 +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX 0xf #define ARM64_MIN_PARANGE_BITS 32 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_DEFAULT 0x0 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_NONE 0x1 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MIN 0x2 -#define ID_AA64MMFR0_TGRAN_2_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT 0x0 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE 0x1 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN 0x2 +#define ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX 0x7 #ifdef CONFIG_ARM64_PA_BITS_52 -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_52 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_52 #else -#define ID_AA64MMFR0_PARANGE_MAX ID_AA64MMFR0_PARANGE_48 +#define ID_AA64MMFR0_EL1_PARANGE_MAX ID_AA64MMFR0_EL1_PARANGE_48 #endif -/* id_aa64mmfr1 */ -#define ID_AA64MMFR1_ETS_SHIFT 36 -#define ID_AA64MMFR1_TWED_SHIFT 32 -#define ID_AA64MMFR1_XNX_SHIFT 28 -#define ID_AA64MMFR1_SPECSEI_SHIFT 24 -#define ID_AA64MMFR1_PAN_SHIFT 20 -#define ID_AA64MMFR1_LOR_SHIFT 16 -#define ID_AA64MMFR1_HPD_SHIFT 12 -#define ID_AA64MMFR1_VHE_SHIFT 8 -#define ID_AA64MMFR1_VMIDBITS_SHIFT 4 -#define ID_AA64MMFR1_HADBS_SHIFT 0 - -#define ID_AA64MMFR1_VMIDBITS_8 0 -#define ID_AA64MMFR1_VMIDBITS_16 2 - -/* id_aa64mmfr2 */ -#define ID_AA64MMFR2_E0PD_SHIFT 60 -#define ID_AA64MMFR2_EVT_SHIFT 56 -#define ID_AA64MMFR2_BBM_SHIFT 52 -#define ID_AA64MMFR2_TTL_SHIFT 48 -#define ID_AA64MMFR2_FWB_SHIFT 40 -#define ID_AA64MMFR2_IDS_SHIFT 36 -#define ID_AA64MMFR2_AT_SHIFT 32 -#define ID_AA64MMFR2_ST_SHIFT 28 -#define ID_AA64MMFR2_NV_SHIFT 24 -#define ID_AA64MMFR2_CCIDX_SHIFT 20 -#define ID_AA64MMFR2_LVA_SHIFT 16 -#define ID_AA64MMFR2_IESB_SHIFT 12 -#define ID_AA64MMFR2_LSM_SHIFT 8 -#define ID_AA64MMFR2_UAO_SHIFT 4 -#define ID_AA64MMFR2_CNP_SHIFT 0 - -/* id_aa64dfr0 */ -#define ID_AA64DFR0_MTPMU_SHIFT 48 -#define ID_AA64DFR0_TRBE_SHIFT 44 -#define ID_AA64DFR0_TRACE_FILT_SHIFT 40 -#define ID_AA64DFR0_DOUBLELOCK_SHIFT 36 -#define ID_AA64DFR0_PMSVER_SHIFT 32 -#define ID_AA64DFR0_CTX_CMPS_SHIFT 28 -#define ID_AA64DFR0_WRPS_SHIFT 20 -#define ID_AA64DFR0_BRPS_SHIFT 12 -#define ID_AA64DFR0_PMUVER_SHIFT 8 -#define ID_AA64DFR0_TRACEVER_SHIFT 4 -#define ID_AA64DFR0_DEBUGVER_SHIFT 0 - -#define ID_AA64DFR0_PMUVER_8_0 0x1 -#define ID_AA64DFR0_PMUVER_8_1 0x4 -#define ID_AA64DFR0_PMUVER_8_4 0x5 -#define ID_AA64DFR0_PMUVER_8_5 0x6 -#define ID_AA64DFR0_PMUVER_IMP_DEF 0xf - -#define ID_AA64DFR0_PMSVER_8_2 0x1 -#define ID_AA64DFR0_PMSVER_8_3 0x2 - -#define ID_DFR0_PERFMON_SHIFT 24 - -#define ID_DFR0_PERFMON_8_0 0x3 -#define ID_DFR0_PERFMON_8_1 0x4 -#define ID_DFR0_PERFMON_8_4 0x5 -#define ID_DFR0_PERFMON_8_5 0x6 - -#define ID_ISAR4_SWP_FRAC_SHIFT 28 -#define ID_ISAR4_PSR_M_SHIFT 24 -#define ID_ISAR4_SYNCH_PRIM_FRAC_SHIFT 20 -#define ID_ISAR4_BARRIER_SHIFT 16 -#define ID_ISAR4_SMC_SHIFT 12 -#define ID_ISAR4_WRITEBACK_SHIFT 8 -#define ID_ISAR4_WITHSHIFTS_SHIFT 4 -#define ID_ISAR4_UNPRIV_SHIFT 0 - -#define ID_DFR1_MTPMU_SHIFT 0 - -#define ID_ISAR0_DIVIDE_SHIFT 24 -#define ID_ISAR0_DEBUG_SHIFT 20 -#define ID_ISAR0_COPROC_SHIFT 16 -#define ID_ISAR0_CMPBRANCH_SHIFT 12 -#define ID_ISAR0_BITFIELD_SHIFT 8 -#define ID_ISAR0_BITCOUNT_SHIFT 4 -#define ID_ISAR0_SWAP_SHIFT 0 - -#define ID_ISAR5_RDM_SHIFT 24 -#define ID_ISAR5_CRC32_SHIFT 16 -#define ID_ISAR5_SHA2_SHIFT 12 -#define ID_ISAR5_SHA1_SHIFT 8 -#define ID_ISAR5_AES_SHIFT 4 -#define ID_ISAR5_SEVL_SHIFT 0 - -#define ID_ISAR6_I8MM_SHIFT 24 -#define ID_ISAR6_BF16_SHIFT 20 -#define ID_ISAR6_SPECRES_SHIFT 16 -#define ID_ISAR6_SB_SHIFT 12 -#define ID_ISAR6_FHM_SHIFT 8 -#define ID_ISAR6_DP_SHIFT 4 -#define ID_ISAR6_JSCVT_SHIFT 0 - -#define ID_MMFR0_INNERSHR_SHIFT 28 -#define ID_MMFR0_FCSE_SHIFT 24 -#define ID_MMFR0_AUXREG_SHIFT 20 -#define ID_MMFR0_TCM_SHIFT 16 -#define ID_MMFR0_SHARELVL_SHIFT 12 -#define ID_MMFR0_OUTERSHR_SHIFT 8 -#define ID_MMFR0_PMSA_SHIFT 4 -#define ID_MMFR0_VMSA_SHIFT 0 - -#define ID_MMFR4_EVT_SHIFT 28 -#define ID_MMFR4_CCIDX_SHIFT 24 -#define ID_MMFR4_LSM_SHIFT 20 -#define ID_MMFR4_HPDS_SHIFT 16 -#define ID_MMFR4_CNP_SHIFT 12 -#define ID_MMFR4_XNX_SHIFT 8 -#define ID_MMFR4_AC2_SHIFT 4 -#define ID_MMFR4_SPECSEI_SHIFT 0 - -#define ID_MMFR5_ETS_SHIFT 0 - -#define ID_PFR0_DIT_SHIFT 24 -#define ID_PFR0_CSV2_SHIFT 16 -#define ID_PFR0_STATE3_SHIFT 12 -#define ID_PFR0_STATE2_SHIFT 8 -#define ID_PFR0_STATE1_SHIFT 4 -#define ID_PFR0_STATE0_SHIFT 0 - -#define ID_DFR0_PERFMON_SHIFT 24 -#define ID_DFR0_MPROFDBG_SHIFT 20 -#define ID_DFR0_MMAPTRC_SHIFT 16 -#define ID_DFR0_COPTRC_SHIFT 12 -#define ID_DFR0_MMAPDBG_SHIFT 8 -#define ID_DFR0_COPSDBG_SHIFT 4 -#define ID_DFR0_COPDBG_SHIFT 0 - -#define ID_PFR2_SSBS_SHIFT 4 -#define ID_PFR2_CSV3_SHIFT 0 - -#define MVFR0_FPROUND_SHIFT 28 -#define MVFR0_FPSHVEC_SHIFT 24 -#define MVFR0_FPSQRT_SHIFT 20 -#define MVFR0_FPDIVIDE_SHIFT 16 -#define MVFR0_FPTRAP_SHIFT 12 -#define MVFR0_FPDP_SHIFT 8 -#define MVFR0_FPSP_SHIFT 4 -#define MVFR0_SIMD_SHIFT 0 - -#define MVFR1_SIMDFMAC_SHIFT 28 -#define MVFR1_FPHP_SHIFT 24 -#define MVFR1_SIMDHP_SHIFT 20 -#define MVFR1_SIMDSP_SHIFT 16 -#define MVFR1_SIMDINT_SHIFT 12 -#define MVFR1_SIMDLS_SHIFT 8 -#define MVFR1_FPDNAN_SHIFT 4 -#define MVFR1_FPFTZ_SHIFT 0 - -#define ID_PFR1_GIC_SHIFT 28 -#define ID_PFR1_VIRT_FRAC_SHIFT 24 -#define ID_PFR1_SEC_FRAC_SHIFT 20 -#define ID_PFR1_GENTIMER_SHIFT 16 -#define ID_PFR1_VIRTUALIZATION_SHIFT 12 -#define ID_PFR1_MPROGMOD_SHIFT 8 -#define ID_PFR1_SECURITY_SHIFT 4 -#define ID_PFR1_PROGMOD_SHIFT 0 - #if defined(CONFIG_ARM64_4K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN4_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN4_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT #elif defined(CONFIG_ARM64_16K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN16_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN16_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT #elif defined(CONFIG_ARM64_64K_PAGES) -#define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX -#define ID_AA64MMFR0_TGRAN_2_SHIFT ID_AA64MMFR0_TGRAN64_2_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SHIFT ID_AA64MMFR0_EL1_TGRAN64_SHIFT +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_EL1_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED_MAX +#define ID_AA64MMFR0_EL1_TGRAN_2_SHIFT ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT #endif -#define MVFR2_FPMISC_SHIFT 4 -#define MVFR2_SIMDMISC_SHIFT 0 - -#define DCZID_DZP_SHIFT 4 -#define DCZID_BS_SHIFT 0 +#define CPACR_EL1_FPEN_EL1EN (BIT(20)) /* enable EL1 access */ +#define CPACR_EL1_FPEN_EL0EN (BIT(21)) /* enable EL0 access, if EL1EN set */ -/* - * The ZCR_ELx_LEN_* definitions intentionally include bits [8:4] which - * are reserved by the SVE architecture for future expansion of the LEN - * field, with compatible semantics. - */ -#define ZCR_ELx_LEN_SHIFT 0 -#define ZCR_ELx_LEN_SIZE 9 -#define ZCR_ELx_LEN_MASK 0x1ff +#define CPACR_EL1_SMEN_EL1EN (BIT(24)) /* enable EL1 access */ +#define CPACR_EL1_SMEN_EL0EN (BIT(25)) /* enable EL0 access, if EL1EN set */ #define CPACR_EL1_ZEN_EL1EN (BIT(16)) /* enable EL1 access */ #define CPACR_EL1_ZEN_EL0EN (BIT(17)) /* enable EL0 access, if EL1EN set */ -#define CPACR_EL1_ZEN (CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN) - -/* TCR EL1 Bit Definitions */ -#define SYS_TCR_EL1_TCMA1 (BIT(58)) -#define SYS_TCR_EL1_TCMA0 (BIT(57)) /* GCR_EL1 Definitions */ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL +#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL) + /* RGSR_EL1 Definitions */ #define SYS_RGSR_EL1_TAG_MASK 0xfUL #define SYS_RGSR_EL1_SEED_SHIFT 8 #define SYS_RGSR_EL1_SEED_MASK 0xffffUL -/* GMID_EL1 field definitions */ -#define SYS_GMID_EL1_BS_SHIFT 0 -#define SYS_GMID_EL1_BS_SIZE 4 - /* TFSR{,E0}_EL1 bit definitions */ #define SYS_TFSR_EL1_TF0_SHIFT 0 #define SYS_TFSR_EL1_TF1_SHIFT 1 @@ -1103,6 +627,7 @@ #define SYS_MPIDR_SAFE_VAL (BIT(31)) #define TRFCR_ELx_TS_SHIFT 5 +#define TRFCR_ELx_TS_MASK ((0x3UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_VIRTUAL ((0x1UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_GUEST_PHYSICAL ((0x2UL) << TRFCR_ELx_TS_SHIFT) #define TRFCR_ELx_TS_PHYSICAL ((0x3UL) << TRFCR_ELx_TS_SHIFT) @@ -1110,7 +635,6 @@ #define TRFCR_ELx_ExTRE BIT(1) #define TRFCR_ELx_E0TRE BIT(0) - /* GIC Hypervisor interface registers */ /* ICH_MISR_EL2 bit definitions */ #define ICH_MISR_EOI (1 << 0) @@ -1137,6 +661,7 @@ #define ICH_HCR_TC (1 << 10) #define ICH_HCR_TALL0 (1 << 11) #define ICH_HCR_TALL1 (1 << 12) +#define ICH_HCR_TDIR (1 << 14) #define ICH_HCR_EOIcount_SHIFT 27 #define ICH_HCR_EOIcount_MASK (0x1f << ICH_HCR_EOIcount_SHIFT) @@ -1169,49 +694,60 @@ #define ICH_VTR_SEIS_MASK (1 << ICH_VTR_SEIS_SHIFT) #define ICH_VTR_A3V_SHIFT 21 #define ICH_VTR_A3V_MASK (1 << ICH_VTR_A3V_SHIFT) +#define ICH_VTR_TDS_SHIFT 19 +#define ICH_VTR_TDS_MASK (1 << ICH_VTR_TDS_SHIFT) + +/* + * Permission Indirection Extension (PIE) permission encodings. + * Encodings with the _O suffix, have overlays applied (Permission Overlay Extension). + */ +#define PIE_NONE_O 0x0 +#define PIE_R_O 0x1 +#define PIE_X_O 0x2 +#define PIE_RX_O 0x3 +#define PIE_RW_O 0x5 +#define PIE_RWnX_O 0x6 +#define PIE_RWX_O 0x7 +#define PIE_R 0x8 +#define PIE_GCS 0x9 +#define PIE_RX 0xa +#define PIE_RW 0xc +#define PIE_RWX 0xe + +#define PIRx_ELx_PERM(idx, perm) ((perm) << ((idx) * 4)) #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ - .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 - .equ .L__reg_num_x\num, \num - .endr - .equ .L__reg_num_xzr, 31 - .macro mrs_s, rt, sreg - __emit_inst(0xd5200000|(\sreg)|(.L__reg_num_\rt)) + __emit_inst(0xd5200000|(\sreg)|(.L__gpr_num_\rt)) .endm .macro msr_s, sreg, rt - __emit_inst(0xd5000000|(\sreg)|(.L__reg_num_\rt)) + __emit_inst(0xd5000000|(\sreg)|(.L__gpr_num_\rt)) .endm #else +#include #include #include #include -#define __DEFINE_MRS_MSR_S_REGNUM \ -" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" \ -" .equ .L__reg_num_x\\num, \\num\n" \ -" .endr\n" \ -" .equ .L__reg_num_xzr, 31\n" - #define DEFINE_MRS_S \ - __DEFINE_MRS_MSR_S_REGNUM \ + __DEFINE_ASM_GPR_NUMS \ " .macro mrs_s, rt, sreg\n" \ - __emit_inst(0xd5200000|(\\sreg)|(.L__reg_num_\\rt)) \ + __emit_inst(0xd5200000|(\\sreg)|(.L__gpr_num_\\rt)) \ " .endm\n" #define DEFINE_MSR_S \ - __DEFINE_MRS_MSR_S_REGNUM \ + __DEFINE_ASM_GPR_NUMS \ " .macro msr_s, sreg, rt\n" \ - __emit_inst(0xd5000000|(\\sreg)|(.L__reg_num_\\rt)) \ + __emit_inst(0xd5000000|(\\sreg)|(.L__gpr_num_\\rt)) \ " .endm\n" #define UNDEFINE_MRS_S \ @@ -1291,6 +827,15 @@ par; \ }) +#define SYS_FIELD_GET(reg, field, val) \ + FIELD_GET(reg##_##field##_MASK, val) + +#define SYS_FIELD_PREP(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, val) + +#define SYS_FIELD_PREP_ENUM(reg, field, val) \ + FIELD_PREP(reg##_##field##_MASK, reg##_##field##_##val) + #endif #endif /* __ASM_SYSREG_H */ diff --git a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c index b90580840b22..8e5bd07a3727 100644 --- a/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c +++ b/tools/testing/selftests/kvm/aarch64/aarch32_id_regs.c @@ -146,8 +146,8 @@ static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu) vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); - el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0), val); - return el0 == ID_AA64PFR0_ELx_64BIT_ONLY; + el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); + return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY; } int main(void) diff --git a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c index f5b6cb3a0019..866002917441 100644 --- a/tools/testing/selftests/kvm/aarch64/debug-exceptions.c +++ b/tools/testing/selftests/kvm/aarch64/debug-exceptions.c @@ -116,12 +116,12 @@ static void reset_debug_state(void) /* Reset all bcr/bvr/wcr/wvr registers */ dfr0 = read_sysreg(id_aa64dfr0_el1); - brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), dfr0); + brps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), dfr0); for (i = 0; i <= brps; i++) { write_dbgbcr(i, 0); write_dbgbvr(i, 0); } - wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), dfr0); + wrps = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), dfr0); for (i = 0; i <= wrps; i++) { write_dbgwcr(i, 0); write_dbgwvr(i, 0); @@ -418,7 +418,7 @@ static void guest_code_ss(int test_cnt) static int debug_version(uint64_t id_aa64dfr0) { - return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), id_aa64dfr0); + return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), id_aa64dfr0); } static void test_guest_debug_exceptions(uint8_t bpn, uint8_t wpn, uint8_t ctx_bpn) @@ -539,14 +539,14 @@ void test_guest_debug_exceptions_all(uint64_t aa64dfr0) int b, w, c; /* Number of breakpoints */ - brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_BRPS), aa64dfr0) + 1; + brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_BRPs), aa64dfr0) + 1; __TEST_REQUIRE(brp_num >= 2, "At least two breakpoints are required"); /* Number of watchpoints */ - wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_WRPS), aa64dfr0) + 1; + wrp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_WRPs), aa64dfr0) + 1; /* Number of context aware breakpoints */ - ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_CTX_CMPS), aa64dfr0) + 1; + ctx_brp_num = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_CTX_CMPs), aa64dfr0) + 1; pr_debug("%s brp_num:%d, wrp_num:%d, ctx_brp_num:%d\n", __func__, brp_num, wrp_num, ctx_brp_num); diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 47bb914ab2fa..975d28be3cca 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -96,14 +96,14 @@ static bool guest_check_lse(void) uint64_t isar0 = read_sysreg(id_aa64isar0_el1); uint64_t atomic; - atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_ATOMICS), isar0); + atomic = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64ISAR0_EL1_ATOMIC), isar0); return atomic >= 2; } static bool guest_check_dc_zva(void) { uint64_t dczid = read_sysreg(dczid_el0); - uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_DZP), dczid); + uint64_t dzp = FIELD_GET(ARM64_FEATURE_MASK(DCZID_EL0_DZP), dczid); return dzp == 0; } @@ -196,7 +196,7 @@ static bool guest_set_ha(void) uint64_t hadbs, tcr; /* Skip if HA is not supported. */ - hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_HADBS), mmfr1); + hadbs = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_HAFDBS), mmfr1); if (hadbs == 0) return false; diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c index 3a0259e25335..6fe12e985ba5 100644 --- a/tools/testing/selftests/kvm/lib/aarch64/processor.c +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -518,9 +518,9 @@ void aarch64_get_supported_page_sizes(uint32_t ipa, err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®); TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd)); - *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf; - *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0; - *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0; + *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN4), val) != 0xf; + *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN64), val) == 0; + *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_TGRAN16), val) != 0; close(vcpu_fd); close(vm_fd); -- cgit v1.2.3 From 54a9ea73527d55ab746d5425e10f3fa748e00e70 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Wed, 11 Oct 2023 19:57:40 +0000 Subject: KVM: arm64: selftests: Test for setting ID register from usersapce Add tests to verify setting ID registers from userspace is handled correctly by KVM. Also add a test case to use ioctl KVM_ARM_GET_REG_WRITABLE_MASKS to get writable masks. Signed-off-by: Jing Zhang Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231011195740.3349631-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/Makefile | 1 + tools/testing/selftests/kvm/aarch64/set_id_regs.c | 481 ++++++++++++++++++++++ 2 files changed, 482 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/set_id_regs.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 07b3f4dc1a77..4f4f6ad025f4 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -156,6 +156,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/debug-exceptions TEST_GEN_PROGS_aarch64 += aarch64/hypercalls TEST_GEN_PROGS_aarch64 += aarch64/page_fault_test TEST_GEN_PROGS_aarch64 += aarch64/psci_test +TEST_GEN_PROGS_aarch64 += aarch64/set_id_regs TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config TEST_GEN_PROGS_aarch64 += aarch64/vgic_init diff --git a/tools/testing/selftests/kvm/aarch64/set_id_regs.c b/tools/testing/selftests/kvm/aarch64/set_id_regs.c new file mode 100644 index 000000000000..bac05210b539 --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/set_id_regs.c @@ -0,0 +1,481 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * set_id_regs - Test for setting ID register from usersapce. + * + * Copyright (c) 2023 Google LLC. + * + * + * Test that KVM supports setting ID registers from userspace and handles the + * feature set correctly. + */ + +#include +#include "kvm_util.h" +#include "processor.h" +#include "test_util.h" +#include + +enum ftr_type { + FTR_EXACT, /* Use a predefined safe value */ + FTR_LOWER_SAFE, /* Smaller value is safe */ + FTR_HIGHER_SAFE, /* Bigger value is safe */ + FTR_HIGHER_OR_ZERO_SAFE, /* Bigger value is safe, but 0 is biggest */ + FTR_END, /* Mark the last ftr bits */ +}; + +#define FTR_SIGNED true /* Value should be treated as signed */ +#define FTR_UNSIGNED false /* Value should be treated as unsigned */ + +struct reg_ftr_bits { + char *name; + bool sign; + enum ftr_type type; + uint8_t shift; + uint64_t mask; + int64_t safe_val; +}; + +struct test_feature_reg { + uint32_t reg; + const struct reg_ftr_bits *ftr_bits; +}; + +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL) \ + { \ + .name = #NAME, \ + .sign = SIGNED, \ + .type = TYPE, \ + .shift = SHIFT, \ + .mask = MASK, \ + .safe_val = SAFE_VAL, \ + } + +#define REG_FTR_BITS(type, reg, field, safe_val) \ + __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \ + reg##_##field##_MASK, safe_val) + +#define S_REG_FTR_BITS(type, reg, field, safe_val) \ + __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \ + reg##_##field##_MASK, safe_val) + +#define REG_FTR_END \ + { \ + .type = FTR_END, \ + } + +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = { + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_dfr0_el1[] = { + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0), + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0), + S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ASIDBITS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0), + REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0), + REG_FTR_END, +}; + +static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = { + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0), + REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0), + REG_FTR_END, +}; + +#define TEST_REG(id, table) \ + { \ + .reg = id, \ + .ftr_bits = &((table)[0]), \ + } + +static struct test_feature_reg test_regs[] = { + TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1), + TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1), + TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1), + TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1), + TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1), + TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1), + TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1), + TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1), + TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1), + TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1), +}; + +#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0); + +static void guest_code(void) +{ + GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1); + GUEST_REG_SYNC(SYS_ID_DFR0_EL1); + GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1); + GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1); + GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1); + GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1); + GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1); + GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1); + GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1); + GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1); + + GUEST_DONE(); +} + +/* Return a safe value to a given ftr_bits an ftr value */ +uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +{ + uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); + + if (ftr_bits->type == FTR_UNSIGNED) { + switch (ftr_bits->type) { + case FTR_EXACT: + ftr = ftr_bits->safe_val; + break; + case FTR_LOWER_SAFE: + if (ftr > 0) + ftr--; + break; + case FTR_HIGHER_SAFE: + if (ftr < ftr_max) + ftr++; + break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (ftr == ftr_max) + ftr = 0; + else if (ftr != 0) + ftr++; + break; + default: + break; + } + } else if (ftr != ftr_max) { + switch (ftr_bits->type) { + case FTR_EXACT: + ftr = ftr_bits->safe_val; + break; + case FTR_LOWER_SAFE: + if (ftr > 0) + ftr--; + break; + case FTR_HIGHER_SAFE: + if (ftr < ftr_max - 1) + ftr++; + break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (ftr != 0 && ftr != ftr_max - 1) + ftr++; + break; + default: + break; + } + } + + return ftr; +} + +/* Return an invalid value to a given ftr_bits an ftr value */ +uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr) +{ + uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0); + + if (ftr_bits->type == FTR_UNSIGNED) { + switch (ftr_bits->type) { + case FTR_EXACT: + ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + break; + case FTR_LOWER_SAFE: + ftr++; + break; + case FTR_HIGHER_SAFE: + ftr--; + break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (ftr == 0) + ftr = ftr_max; + else + ftr--; + break; + default: + break; + } + } else if (ftr != ftr_max) { + switch (ftr_bits->type) { + case FTR_EXACT: + ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1); + break; + case FTR_LOWER_SAFE: + ftr++; + break; + case FTR_HIGHER_SAFE: + ftr--; + break; + case FTR_HIGHER_OR_ZERO_SAFE: + if (ftr == 0) + ftr = ftr_max - 1; + else + ftr--; + break; + default: + break; + } + } else { + ftr = 0; + } + + return ftr; +} + +static void test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg, + const struct reg_ftr_bits *ftr_bits) +{ + uint8_t shift = ftr_bits->shift; + uint64_t mask = ftr_bits->mask; + uint64_t val, new_val, ftr; + + vcpu_get_reg(vcpu, reg, &val); + ftr = (val & mask) >> shift; + + ftr = get_safe_value(ftr_bits, ftr); + + ftr <<= shift; + val &= ~mask; + val |= ftr; + + vcpu_set_reg(vcpu, reg, val); + vcpu_get_reg(vcpu, reg, &new_val); + TEST_ASSERT_EQ(new_val, val); +} + +static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg, + const struct reg_ftr_bits *ftr_bits) +{ + uint8_t shift = ftr_bits->shift; + uint64_t mask = ftr_bits->mask; + uint64_t val, old_val, ftr; + int r; + + vcpu_get_reg(vcpu, reg, &val); + ftr = (val & mask) >> shift; + + ftr = get_invalid_value(ftr_bits, ftr); + + old_val = val; + ftr <<= shift; + val &= ~mask; + val |= ftr; + + r = __vcpu_set_reg(vcpu, reg, val); + TEST_ASSERT(r < 0 && errno == EINVAL, + "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno); + + vcpu_get_reg(vcpu, reg, &val); + TEST_ASSERT_EQ(val, old_val); +} + +static void test_user_set_reg(struct kvm_vcpu *vcpu, bool aarch64_only) +{ + uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE]; + struct reg_mask_range range = { + .addr = (__u64)masks, + }; + int ret; + + /* KVM should return error when reserved field is not zero */ + range.reserved[0] = 1; + ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); + TEST_ASSERT(ret, "KVM doesn't check invalid parameters."); + + /* Get writable masks for feature ID registers */ + memset(range.reserved, 0, sizeof(range.reserved)); + vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range); + + for (int i = 0; i < ARRAY_SIZE(test_regs); i++) { + const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits; + uint32_t reg_id = test_regs[i].reg; + uint64_t reg = KVM_ARM64_SYS_REG(reg_id); + int idx; + + /* Get the index to masks array for the idreg */ + idx = KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(reg_id), sys_reg_Op1(reg_id), + sys_reg_CRn(reg_id), sys_reg_CRm(reg_id), + sys_reg_Op2(reg_id)); + + for (int j = 0; ftr_bits[j].type != FTR_END; j++) { + /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */ + if (aarch64_only && sys_reg_CRm(reg_id) < 4) { + ksft_test_result_skip("%s on AARCH64 only system\n", + ftr_bits[j].name); + continue; + } + + /* Make sure the feature field is writable */ + TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask); + + test_reg_set_fail(vcpu, reg, &ftr_bits[j]); + test_reg_set_success(vcpu, reg, &ftr_bits[j]); + + ksft_test_result_pass("%s\n", ftr_bits[j].name); + } + } +} + +static void test_guest_reg_read(struct kvm_vcpu *vcpu) +{ + bool done = false; + struct ucall uc; + uint64_t val; + + while (!done) { + vcpu_run(vcpu); + + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_SYNC: + /* Make sure the written values are seen by guest */ + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(uc.args[2]), &val); + TEST_ASSERT_EQ(val, uc.args[3]); + break; + case UCALL_DONE: + done = true; + break; + default: + TEST_FAIL("Unexpected ucall: %lu", uc.cmd); + } + } +} + +int main(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + bool aarch64_only; + uint64_t val, el0; + int ftr_cnt; + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES)); + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + + /* Check for AARCH64 only system */ + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), &val); + el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val); + aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY); + + ksft_print_header(); + + ftr_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) + + ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) + + ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) + + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) + ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) + ARRAY_SIZE(ftr_id_aa64zfr0_el1) - + ARRAY_SIZE(test_regs); + + ksft_set_plan(ftr_cnt); + + test_user_set_reg(vcpu, aarch64_only); + test_guest_reg_read(vcpu); + + kvm_vm_free(vm); + + ksft_finished(); +} -- cgit v1.2.3 From 38ce26bf26666779565c2770e7ef36c02e0212d7 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 18 Oct 2023 23:32:08 +0000 Subject: KVM: arm64: Don't zero VTTBR in __tlb_switch_to_host() HCR_EL2.TGE=0 is sufficient to disable stage-2 translation, so there's no need to explicitly zero VTTBR_EL2. Link: https://lore.kernel.org/r/20231018233212.2888027-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/vhe/tlb.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index 46bd43f61d76..f3f2e142e4f4 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -66,7 +66,6 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt) * We're done with the TLB operation, let's restore the host's * view of HCR_EL2. */ - write_sysreg(0, vttbr_el2); write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); -- cgit v1.2.3 From 4288ff7ba195f49138eec0d9c4ff8c049714e918 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 18 Oct 2023 23:32:09 +0000 Subject: KVM: arm64: Restore the stage-2 context in VHE's __tlb_switch_to_host() An MMU notifier could cause us to clobber the stage-2 context loaded on a CPU when we switch to another VM's context to invalidate. This isn't an issue right now as the stage-2 context gets reloaded on every guest entry, but is disastrous when moving __load_stage2() into the vcpu_load() path. Restore the previous stage-2 context on the way out of a TLB invalidation if we installed something else. Deliberately do this after TGE=1 is synchronized to keep things safe in light of the speculative AT errata. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231018233212.2888027-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/vhe/tlb.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kvm/hyp/vhe/tlb.c b/arch/arm64/kvm/hyp/vhe/tlb.c index f3f2e142e4f4..b636b4111dbf 100644 --- a/arch/arm64/kvm/hyp/vhe/tlb.c +++ b/arch/arm64/kvm/hyp/vhe/tlb.c @@ -11,18 +11,25 @@ #include struct tlb_inv_context { - unsigned long flags; - u64 tcr; - u64 sctlr; + struct kvm_s2_mmu *mmu; + unsigned long flags; + u64 tcr; + u64 sctlr; }; static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, struct tlb_inv_context *cxt) { + struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); u64 val; local_irq_save(cxt->flags); + if (vcpu && mmu != vcpu->arch.hw_mmu) + cxt->mmu = vcpu->arch.hw_mmu; + else + cxt->mmu = NULL; + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* * For CPUs that are affected by ARM errata 1165522 or 1530923, @@ -69,6 +76,10 @@ static void __tlb_switch_to_host(struct tlb_inv_context *cxt) write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); isb(); + /* ... and the stage-2 MMU context that we switched away from */ + if (cxt->mmu) + __load_stage2(cxt->mmu, cxt->mmu->arch); + if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) { /* Restore the registers to what they were */ write_sysreg_el1(cxt->tcr, SYS_TCR); -- cgit v1.2.3 From 5eba523e1e5e83d2c1a282c3c488d98b990333c2 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 18 Oct 2023 23:32:10 +0000 Subject: KVM: arm64: Reload stage-2 for VMID change on VHE Naturally, a change to the VMID for an MMU implies a new value for VTTBR. Reload on VMID change in anticipation of loading stage-2 on vcpu_load() instead of every guest entry. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231018233212.2888027-4-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 2 +- arch/arm64/kvm/arm.c | 5 ++++- arch/arm64/kvm/vmid.c | 11 ++++++++--- 3 files changed, 13 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..be0ab101c557 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1025,7 +1025,7 @@ int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu, extern unsigned int __ro_after_init kvm_arm_vmid_bits; int __init kvm_arm_vmid_alloc_init(void); void __init kvm_arm_vmid_alloc_free(void); -void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); +bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid); void kvm_arm_vmid_clear_active(void); static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..3ab904adbd64 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -950,7 +950,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * making a thread's VMID inactive. So we need to call * kvm_arm_vmid_update() in non-premptible context. */ - kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid); + if (kvm_arm_vmid_update(&vcpu->arch.hw_mmu->vmid) && + has_vhe()) + __load_stage2(vcpu->arch.hw_mmu, + vcpu->arch.hw_mmu->arch); kvm_pmu_flush_hwstate(vcpu); diff --git a/arch/arm64/kvm/vmid.c b/arch/arm64/kvm/vmid.c index 7fe8ba1a2851..806223b7022a 100644 --- a/arch/arm64/kvm/vmid.c +++ b/arch/arm64/kvm/vmid.c @@ -135,10 +135,11 @@ void kvm_arm_vmid_clear_active(void) atomic64_set(this_cpu_ptr(&active_vmids), VMID_ACTIVE_INVALID); } -void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) +bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) { unsigned long flags; u64 vmid, old_active_vmid; + bool updated = false; vmid = atomic64_read(&kvm_vmid->id); @@ -156,17 +157,21 @@ void kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid) if (old_active_vmid != 0 && vmid_gen_match(vmid) && 0 != atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_vmids), old_active_vmid, vmid)) - return; + return false; raw_spin_lock_irqsave(&cpu_vmid_lock, flags); /* Check that our VMID belongs to the current generation. */ vmid = atomic64_read(&kvm_vmid->id); - if (!vmid_gen_match(vmid)) + if (!vmid_gen_match(vmid)) { vmid = new_vmid(kvm_vmid); + updated = true; + } atomic64_set(this_cpu_ptr(&active_vmids), vmid); raw_spin_unlock_irqrestore(&cpu_vmid_lock, flags); + + return updated; } /* -- cgit v1.2.3 From 27cde4c0fe28eab228d1fc9d84c68a109219887d Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 18 Oct 2023 23:32:11 +0000 Subject: KVM: arm64: Rename helpers for VHE vCPU load/put The names for the helpers we expose to the 'generic' KVM code are a bit imprecise; we switch the EL0 + EL1 sysreg context and setup trap controls that do not need to change for every guest entry/exit. Rename + shuffle things around a bit in preparation for loading the stage-2 MMU context on vcpu_load(). Link: https://lore.kernel.org/r/20231018233212.2888027-5-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 4 ++-- arch/arm64/include/asm/kvm_hyp.h | 7 ++----- arch/arm64/kvm/arm.c | 4 ++-- arch/arm64/kvm/hyp/vhe/switch.c | 18 +++++++++++++++--- arch/arm64/kvm/hyp/vhe/sysreg-sr.c | 11 ++++------- 5 files changed, 25 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index be0ab101c557..759adee42018 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -1109,8 +1109,8 @@ static inline bool kvm_set_pmuserenr(u64 val) } #endif -void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu); -void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu); +void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); +void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu); int __init kvm_set_ipa_limit(void); diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 66efd67ea7e8..145ce73fc16c 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -93,6 +93,8 @@ void __timer_disable_traps(struct kvm_vcpu *vcpu); void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); #else +void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu); +void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu); void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); @@ -111,11 +113,6 @@ void __fpsimd_save_state(struct user_fpsimd_state *fp_regs); void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs); void __sve_restore_state(void *sve_pffr, u32 *fpsr); -#ifndef __KVM_NVHE_HYPERVISOR__ -void activate_traps_vhe_load(struct kvm_vcpu *vcpu); -void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu); -#endif - u64 __guest_enter(struct kvm_vcpu *vcpu); bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 3ab904adbd64..584be562b1d4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -448,7 +448,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_vgic_load(vcpu); kvm_timer_vcpu_load(vcpu); if (has_vhe()) - kvm_vcpu_load_sysregs_vhe(vcpu); + kvm_vcpu_load_vhe(vcpu); kvm_arch_vcpu_load_fp(vcpu); kvm_vcpu_pmu_restore_guest(vcpu); if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) @@ -472,7 +472,7 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) kvm_arch_vcpu_put_debug_state_flags(vcpu); kvm_arch_vcpu_put_fp(vcpu); if (has_vhe()) - kvm_vcpu_put_sysregs_vhe(vcpu); + kvm_vcpu_put_vhe(vcpu); kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); kvm_vcpu_pmu_restore_host(vcpu); diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index 6537f58b1a8c..d05b6a08dcde 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -93,12 +93,12 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu) NOKPROBE_SYMBOL(__deactivate_traps); /* - * Disable IRQs in {activate,deactivate}_traps_vhe_{load,put}() to + * Disable IRQs in __vcpu_{load,put}_{activate,deactivate}_traps() to * prevent a race condition between context switching of PMUSERENR_EL0 * in __{activate,deactivate}_traps_common() and IPIs that attempts to * update PMUSERENR_EL0. See also kvm_set_pmuserenr(). */ -void activate_traps_vhe_load(struct kvm_vcpu *vcpu) +static void __vcpu_load_activate_traps(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -107,7 +107,7 @@ void activate_traps_vhe_load(struct kvm_vcpu *vcpu) local_irq_restore(flags); } -void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) +static void __vcpu_put_deactivate_traps(struct kvm_vcpu *vcpu) { unsigned long flags; @@ -116,6 +116,18 @@ void deactivate_traps_vhe_put(struct kvm_vcpu *vcpu) local_irq_restore(flags); } +void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu) +{ + __vcpu_load_switch_sysregs(vcpu); + __vcpu_load_activate_traps(vcpu); +} + +void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu) +{ + __vcpu_put_deactivate_traps(vcpu); + __vcpu_put_switch_sysregs(vcpu); +} + static const exit_handler_fn hyp_exit_handlers[] = { [0 ... ESR_ELx_EC_MAX] = NULL, [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32, diff --git a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c index b35a178e7e0d..8e1e0d5033b6 100644 --- a/arch/arm64/kvm/hyp/vhe/sysreg-sr.c +++ b/arch/arm64/kvm/hyp/vhe/sysreg-sr.c @@ -52,7 +52,7 @@ void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); /** - * kvm_vcpu_load_sysregs_vhe - Load guest system registers to the physical CPU + * __vcpu_load_switch_sysregs - Load guest system registers to the physical CPU * * @vcpu: The VCPU pointer * @@ -62,7 +62,7 @@ NOKPROBE_SYMBOL(sysreg_restore_guest_state_vhe); * and loading system register state early avoids having to load them on * every entry to the VM. */ -void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) +void __vcpu_load_switch_sysregs(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; @@ -92,12 +92,10 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) __sysreg_restore_el1_state(guest_ctxt); vcpu_set_flag(vcpu, SYSREGS_ON_CPU); - - activate_traps_vhe_load(vcpu); } /** - * kvm_vcpu_put_sysregs_vhe - Restore host system registers to the physical CPU + * __vcpu_put_switch_syregs - Restore host system registers to the physical CPU * * @vcpu: The VCPU pointer * @@ -107,13 +105,12 @@ void kvm_vcpu_load_sysregs_vhe(struct kvm_vcpu *vcpu) * and deferring saving system register state until we're no longer running the * VCPU avoids having to save them on every exit from the VM. */ -void kvm_vcpu_put_sysregs_vhe(struct kvm_vcpu *vcpu) +void __vcpu_put_switch_sysregs(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; struct kvm_cpu_context *host_ctxt; host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; - deactivate_traps_vhe_put(vcpu); __sysreg_save_el1_state(guest_ctxt); __sysreg_save_user_state(guest_ctxt); -- cgit v1.2.3 From 934bf871f0113e1e57cfa262dcbc9aad5fa0a252 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Wed, 18 Oct 2023 23:32:12 +0000 Subject: KVM: arm64: Load the stage-2 MMU context in kvm_vcpu_load_vhe() To date the VHE code has aggressively reloaded the stage-2 MMU context on every guest entry, despite the fact that this isn't necessary. This was probably done for consistency with the nVHE code, which needs to switch in/out the stage-2 MMU context as both the host and guest run at EL1. Hoist __load_stage2() into kvm_vcpu_load_vhe(), thus avoiding a reload on every guest entry/exit. This is likely to be beneficial to systems with one of the speculative AT errata, as there is now one fewer context synchronization event on the guest entry path. Additionally, it is possible that implementations have hitched correctness mitigations on writes to VTTBR_EL2, which are now elided on guest re-entry. Note that __tlb_switch_to_guest() is deliberately left untouched as it can be called outside the context of a running vCPU. Link: https://lore.kernel.org/r/20231018233212.2888027-6-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/vhe/switch.c | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/arch/arm64/kvm/hyp/vhe/switch.c b/arch/arm64/kvm/hyp/vhe/switch.c index d05b6a08dcde..b0cafd7c5f8f 100644 --- a/arch/arm64/kvm/hyp/vhe/switch.c +++ b/arch/arm64/kvm/hyp/vhe/switch.c @@ -120,6 +120,7 @@ void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu) { __vcpu_load_switch_sysregs(vcpu); __vcpu_load_activate_traps(vcpu); + __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); } void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu) @@ -182,17 +183,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) sysreg_save_host_state_vhe(host_ctxt); /* - * ARM erratum 1165522 requires us to configure both stage 1 and - * stage 2 translation for the guest context before we clear - * HCR_EL2.TGE. - * - * We have already configured the guest's stage 1 translation in - * kvm_vcpu_load_sysregs_vhe above. We must now call - * __load_stage2 before __activate_traps, because - * __load_stage2 configures stage 2 translation, and - * __activate_traps clear HCR_EL2.TGE (among other things). + * Note that ARM erratum 1165522 requires us to configure both stage 1 + * and stage 2 translation for the guest context before we clear + * HCR_EL2.TGE. The stage 1 and stage 2 guest context has already been + * loaded on the CPU in kvm_vcpu_load_vhe(). */ - __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch); __activate_traps(vcpu); __kvm_adjust_pc(vcpu); -- cgit v1.2.3 From fe49fd940e22592988552e3bcd03f5a64facdecf Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Thu, 12 Oct 2023 21:51:08 +0100 Subject: KVM: arm64: Move VTCR_EL2 into struct s2_mmu We currently have a global VTCR_EL2 value for each guest, even if the guest uses NV. This implies that the guest's own S2 must fit in the host's. This is odd, for multiple reasons: - the PARange values and the number of IPA bits don't necessarily match: you can have 33 bits of IPA space, and yet you can only describe 32 or 36 bits of PARange - When userspace set the IPA space, it creates a contract with the kernel saying "this is the IPA space I'm prepared to handle". At no point does it constraint the guest's own IPA space as long as the guest doesn't try to use a [I]PA outside of the IPA space set by userspace - We don't even try to hide the value of ID_AA64MMFR0_EL1.PARange. And then there is the consequence of the above: if a guest tries to create a S2 that has for input address something that is larger than the IPA space defined by the host, we inject a fatal exception. This is no good. For all intent and purposes, a guest should be able to have the S2 it really wants, as long as the *output* address of that S2 isn't outside of the IPA space. For that, we need to have a per-s2_mmu VTCR_EL2 setting, which allows us to represent the full PARange. Move the vctr field into the s2_mmu structure, which has no impact whatsoever, except for NV. Note that once we are able to override ID_AA64MMFR0_EL1.PARange from userspace, we'll also be able to restrict the size of the shadow S2 that NV uses. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231012205108.3937270-1-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 13 ++++++++++--- arch/arm64/include/asm/kvm_mmu.h | 8 ++++---- arch/arm64/include/asm/stage2_pgtable.h | 4 ++-- arch/arm64/kvm/hyp/nvhe/mem_protect.c | 8 ++++---- arch/arm64/kvm/hyp/nvhe/pkvm.c | 4 ++-- arch/arm64/kvm/hyp/pgtable.c | 2 +- arch/arm64/kvm/mmu.c | 13 +++++++------ arch/arm64/kvm/pkvm.c | 2 +- arch/arm64/kvm/vgic/vgic-kvm-device.c | 3 ++- 9 files changed, 33 insertions(+), 24 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 759adee42018..b6b10eb7543f 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -158,6 +158,16 @@ struct kvm_s2_mmu { phys_addr_t pgd_phys; struct kvm_pgtable *pgt; + /* + * VTCR value used on the host. For a non-NV guest (or a NV + * guest that runs in a context where its own S2 doesn't + * apply), its T0SZ value reflects that of the IPA size. + * + * For a shadow S2 MMU, T0SZ reflects the PARange exposed to + * the guest. + */ + u64 vtcr; + /* The last vcpu id that ran on each physical CPU */ int __percpu *last_vcpu_ran; @@ -205,9 +215,6 @@ struct kvm_protected_vm { struct kvm_arch { struct kvm_s2_mmu mmu; - /* VTCR_EL2 value for this VM */ - u64 vtcr; - /* Interrupt controller */ struct vgic_dist vgic; diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 96a80e8f6226..caa29c16ac50 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -150,9 +150,9 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) */ #define KVM_PHYS_SHIFT (40) -#define kvm_phys_shift(kvm) VTCR_EL2_IPA(kvm->arch.vtcr) -#define kvm_phys_size(kvm) (_AC(1, ULL) << kvm_phys_shift(kvm)) -#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - _AC(1, ULL)) +#define kvm_phys_shift(mmu) VTCR_EL2_IPA((mmu)->vtcr) +#define kvm_phys_size(mmu) (_AC(1, ULL) << kvm_phys_shift(mmu)) +#define kvm_phys_mask(mmu) (kvm_phys_size(mmu) - _AC(1, ULL)) #include #include @@ -299,7 +299,7 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu) static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, struct kvm_arch *arch) { - write_sysreg(arch->vtcr, vtcr_el2); + write_sysreg(mmu->vtcr, vtcr_el2); write_sysreg(kvm_get_vttbr(mmu), vttbr_el2); /* diff --git a/arch/arm64/include/asm/stage2_pgtable.h b/arch/arm64/include/asm/stage2_pgtable.h index c8dca8ae359c..23d27623e478 100644 --- a/arch/arm64/include/asm/stage2_pgtable.h +++ b/arch/arm64/include/asm/stage2_pgtable.h @@ -21,13 +21,13 @@ * (IPA_SHIFT - 4). */ #define stage2_pgtable_levels(ipa) ARM64_HW_PGTABLE_LEVELS((ipa) - 4) -#define kvm_stage2_levels(kvm) VTCR_EL2_LVLS(kvm->arch.vtcr) +#define kvm_stage2_levels(mmu) VTCR_EL2_LVLS((mmu)->vtcr) /* * kvm_mmmu_cache_min_pages() is the number of pages required to install * a stage-2 translation. We pre-allocate the entry level page table at * the VM creation. */ -#define kvm_mmu_cache_min_pages(kvm) (kvm_stage2_levels(kvm) - 1) +#define kvm_mmu_cache_min_pages(mmu) (kvm_stage2_levels(mmu) - 1) #endif /* __ARM64_S2_PGTABLE_H_ */ diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c index 9d703441278b..8d0a5834e883 100644 --- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c @@ -129,8 +129,8 @@ static void prepare_host_vtcr(void) parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val); phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange); - host_mmu.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, - id_aa64mmfr1_el1_sys_val, phys_shift); + host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val, + id_aa64mmfr1_el1_sys_val, phys_shift); } static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot); @@ -235,7 +235,7 @@ int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd) unsigned long nr_pages; int ret; - nr_pages = kvm_pgtable_stage2_pgd_size(vm->kvm.arch.vtcr) >> PAGE_SHIFT; + nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT; ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0); if (ret) return ret; @@ -295,7 +295,7 @@ int __pkvm_prot_finalize(void) return -EPERM; params->vttbr = kvm_get_vttbr(mmu); - params->vtcr = host_mmu.arch.vtcr; + params->vtcr = mmu->vtcr; params->hcr_el2 |= HCR_VM; /* diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c index 8033ef353a5d..9d23a51d7f75 100644 --- a/arch/arm64/kvm/hyp/nvhe/pkvm.c +++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c @@ -303,7 +303,7 @@ static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm, { hyp_vm->host_kvm = host_kvm; hyp_vm->kvm.created_vcpus = nr_vcpus; - hyp_vm->kvm.arch.vtcr = host_mmu.arch.vtcr; + hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr; } static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu, @@ -483,7 +483,7 @@ int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva, } vm_size = pkvm_get_hyp_vm_size(nr_vcpus); - pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.vtcr); + pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr); ret = -ENOMEM; diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f155b8c9e98c..0c84872fd89d 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1511,7 +1511,7 @@ int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_s2_mmu *mmu, kvm_pgtable_force_pte_cb_t force_pte_cb) { size_t pgd_sz; - u64 vtcr = mmu->arch->vtcr; + u64 vtcr = mmu->vtcr; u32 ia_bits = VTCR_EL2_IPA(vtcr); u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr); u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0; diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 482280fe22d7..551f21936eaa 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -892,7 +892,7 @@ int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long t mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); - kvm->arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); + mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); if (mmu->pgt != NULL) { kvm_err("kvm_arch already initialized?\n"); @@ -1067,7 +1067,8 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t addr; int ret = 0; struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO }; - struct kvm_pgtable *pgt = kvm->arch.mmu.pgt; + struct kvm_s2_mmu *mmu = &kvm->arch.mmu; + struct kvm_pgtable *pgt = mmu->pgt; enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | KVM_PGTABLE_PROT_R | (writable ? KVM_PGTABLE_PROT_W : 0); @@ -1080,7 +1081,7 @@ int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { ret = kvm_mmu_topup_memory_cache(&cache, - kvm_mmu_cache_min_pages(kvm)); + kvm_mmu_cache_min_pages(mmu)); if (ret) break; @@ -1431,7 +1432,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, if (fault_status != ESR_ELx_FSC_PERM || (logging_active && write_fault)) { ret = kvm_mmu_topup_memory_cache(memcache, - kvm_mmu_cache_min_pages(kvm)); + kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); if (ret) return ret; } @@ -1747,7 +1748,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) } /* Userspace should not be able to register out-of-bounds IPAs */ - VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->kvm)); + VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu)); if (fault_status == ESR_ELx_FSC_ACCESS) { handle_access_fault(vcpu, fault_ipa); @@ -2021,7 +2022,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, * Prevent userspace from creating a memory region outside of the IPA * space addressable by the KVM guest IPA space. */ - if ((new->base_gfn + new->npages) > (kvm_phys_size(kvm) >> PAGE_SHIFT)) + if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT)) return -EFAULT; hva = new->userspace_addr; diff --git a/arch/arm64/kvm/pkvm.c b/arch/arm64/kvm/pkvm.c index 6ff3ec18c925..8350fb8fee0b 100644 --- a/arch/arm64/kvm/pkvm.c +++ b/arch/arm64/kvm/pkvm.c @@ -123,7 +123,7 @@ static int __pkvm_create_hyp_vm(struct kvm *host_kvm) if (host_kvm->created_vcpus < 1) return -EINVAL; - pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.vtcr); + pgd_sz = kvm_pgtable_stage2_pgd_size(host_kvm->arch.mmu.vtcr); /* * The PGD pages will be reclaimed using a hyp_memcache which implies diff --git a/arch/arm64/kvm/vgic/vgic-kvm-device.c b/arch/arm64/kvm/vgic/vgic-kvm-device.c index 212b73a715c1..64f8e2e1c443 100644 --- a/arch/arm64/kvm/vgic/vgic-kvm-device.c +++ b/arch/arm64/kvm/vgic/vgic-kvm-device.c @@ -27,7 +27,8 @@ int vgic_check_iorange(struct kvm *kvm, phys_addr_t ioaddr, if (addr + size < addr) return -EINVAL; - if (addr & ~kvm_phys_mask(kvm) || addr + size > kvm_phys_size(kvm)) + if (addr & ~kvm_phys_mask(&kvm->arch.mmu) || + (addr + size) > kvm_phys_size(&kvm->arch.mmu)) return -E2BIG; return 0; -- cgit v1.2.3 From 1616ca6f3c10723c1b60ae44724212fae88f502d Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:41 +0000 Subject: KVM: arm64: PMU: Introduce helpers to set the guest's PMU Introduce new helper functions to set the guest's PMU (kvm->arch.arm_pmu) either to a default probed instance or to a caller requested one, and use it when the guest's PMU needs to be set. These helpers will make it easier for the following patches to modify the relevant code. No functional change intended. Reviewed-by: Sebastian Ott Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-2-rananta@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/pmu-emul.c | 50 +++++++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04dc5d..fb9817bdfeb5 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -874,6 +874,36 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) +{ + lockdep_assert_held(&kvm->arch.config_lock); + + kvm->arch.arm_pmu = arm_pmu; +} + +/** + * kvm_arm_set_default_pmu - No PMU set, get the default one. + * @kvm: The kvm pointer + * + * The observant among you will notice that the supported_cpus + * mask does not get updated for the default PMU even though it + * is quite possible the selected instance supports only a + * subset of cores in the system. This is intentional, and + * upholds the preexisting behavior on heterogeneous systems + * where vCPUs can be scheduled on any core but the guest + * counters could stop working. + */ +static int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); + + if (!arm_pmu) + return -ENODEV; + + kvm_arm_set_pmu(kvm, arm_pmu); + return 0; +} + static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) { struct kvm *kvm = vcpu->kvm; @@ -893,7 +923,7 @@ static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) break; } - kvm->arch.arm_pmu = arm_pmu; + kvm_arm_set_pmu(kvm, arm_pmu); cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); ret = 0; break; @@ -917,20 +947,10 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) return -EBUSY; if (!kvm->arch.arm_pmu) { - /* - * No PMU set, get the default one. - * - * The observant among you will notice that the supported_cpus - * mask does not get updated for the default PMU even though it - * is quite possible the selected instance supports only a - * subset of cores in the system. This is intentional, and - * upholds the preexisting behavior on heterogeneous systems - * where vCPUs can be scheduled on any core but the guest - * counters could stop working. - */ - kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); - if (!kvm->arch.arm_pmu) - return -ENODEV; + int ret = kvm_arm_set_default_pmu(kvm); + + if (ret) + return ret; } switch (attr->attr) { -- cgit v1.2.3 From bc512d6a9b92390760f0e8c895501c5016539929 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Oct 2023 18:56:17 +0000 Subject: KVM: arm64: Make PMEVTYPER_EL0.NSH RES0 if EL2 isn't advertised The NSH bit, which filters event counting at EL2, is required by the architecture if an implementation has EL2. Even though KVM doesn't support nested virt yet, it makes no effort to hide the existence of EL2 from the ID registers. Userspace can, however, change the value of PFR0 to hide EL2. Align KVM's sysreg emulation with the architecture and make NSH RES0 if EL2 isn't advertised. Keep in mind the bit is ignored when constructing the backing perf event. While at it, build the event type mask using explicit field definitions instead of relying on ARMV8_PMU_EVTYPE_MASK. KVM probably should've been doing this in the first place, as it avoids changes to the aforementioned mask affecting sysreg emulation. Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20231019185618.3442949-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/pmu-emul.c | 21 ++++++++++++++------- arch/arm64/kvm/sys_regs.c | 8 ++++++-- include/kvm/arm_pmu.h | 5 +++++ 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6b066e04dc5d..32d83db9674e 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -60,6 +60,18 @@ static u32 kvm_pmu_event_mask(struct kvm *kvm) return __kvm_pmu_event_mask(pmuver); } +u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + u64 mask = ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMU_EXCLUDE_EL0 | + kvm_pmu_event_mask(kvm); + u64 pfr0 = IDREG(kvm, SYS_ID_AA64PFR0_EL1); + + if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) + mask |= ARMV8_PMU_INCLUDE_EL2; + + return mask; +} + /** * kvm_pmc_is_64bit - determine if counter is 64bit * @pmc: counter context @@ -657,18 +669,13 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx) { struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); - u64 reg, mask; + u64 reg; if (!kvm_vcpu_has_pmu(vcpu)) return; - mask = ARMV8_PMU_EVTYPE_MASK; - mask &= ~ARMV8_PMU_EVTYPE_EVENT; - mask |= kvm_pmu_event_mask(vcpu->kvm); - reg = counter_index_to_evtreg(pmc->idx); - - __vcpu_sys_reg(vcpu, reg) = data & mask; + __vcpu_sys_reg(vcpu, reg) = data & kvm_pmu_evtyper_mask(vcpu->kvm); kvm_pmu_create_perf_event(pmc); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e92ec810d449..78720c373904 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -746,8 +746,12 @@ static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { + /* This thing will UNDEF, who cares about the reset value? */ + if (!kvm_vcpu_has_pmu(vcpu)) + return 0; + reset_unknown(vcpu, r); - __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK; + __vcpu_sys_reg(vcpu, r->reg) &= kvm_pmu_evtyper_mask(vcpu->kvm); return __vcpu_sys_reg(vcpu, r->reg); } @@ -988,7 +992,7 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_set_counter_event_type(vcpu, p->regval, idx); kvm_vcpu_pmu_restore_guest(vcpu); } else { - p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK; + p->regval = __vcpu_sys_reg(vcpu, reg); } return true; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 31029f4f7be8..fd0aa8105a5b 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +u64 kvm_pmu_evtyper_mask(struct kvm *kvm); #else struct kvm_pmu { @@ -172,6 +173,10 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; } +static inline u64 kvm_pmu_evtyper_mask(struct kvm *kvm) +{ + return 0; +} static inline void kvm_vcpu_pmu_resync_el0(void) {} #endif -- cgit v1.2.3 From ae8d3522e5b7208d238c893a00957864536983dc Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 19 Oct 2023 18:56:18 +0000 Subject: KVM: arm64: Add PMU event filter bits required if EL3 is implemented Suzuki noticed that KVM's PMU emulation is oblivious to the NSU and NSK event filter bits. On systems that have EL3 these bits modify the filter behavior in non-secure EL0 and EL1, respectively. Even though the kernel doesn't use these bits, it is entirely possible some other guest OS does. Additionally, it would appear that these and the M bit are required by the architecture if EL3 is implemented. Allow the EL3 event filter bits to be set if EL3 is advertised in the guest's ID register. Implement the behavior of NSU and NSK according to the pseudocode, and entirely ignore the M bit for perf event creation. Reported-by: Suzuki K Poulose Reviewed-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20231019185618.3442949-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/pmu-emul.c | 15 +++++++++++++-- include/linux/perf/arm_pmuv3.h | 9 ++++++--- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 32d83db9674e..e6be14e38cee 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -69,6 +69,11 @@ u64 kvm_pmu_evtyper_mask(struct kvm *kvm) if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL2, pfr0)) mask |= ARMV8_PMU_INCLUDE_EL2; + if (SYS_FIELD_GET(ID_AA64PFR0_EL1, EL3, pfr0)) + mask |= ARMV8_PMU_EXCLUDE_NS_EL0 | + ARMV8_PMU_EXCLUDE_NS_EL1 | + ARMV8_PMU_EXCLUDE_EL3; + return mask; } @@ -596,6 +601,7 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) struct perf_event *event; struct perf_event_attr attr; u64 eventsel, reg, data; + bool p, u, nsk, nsu; reg = counter_index_to_evtreg(pmc->idx); data = __vcpu_sys_reg(vcpu, reg); @@ -622,13 +628,18 @@ static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) return; + p = data & ARMV8_PMU_EXCLUDE_EL1; + u = data & ARMV8_PMU_EXCLUDE_EL0; + nsk = data & ARMV8_PMU_EXCLUDE_NS_EL1; + nsu = data & ARMV8_PMU_EXCLUDE_NS_EL0; + memset(&attr, 0, sizeof(struct perf_event_attr)); attr.type = arm_pmu->pmu.type; attr.size = sizeof(attr); attr.pinned = 1; attr.disabled = !kvm_pmu_counter_is_enabled(pmc); - attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; - attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; + attr.exclude_user = (u != nsu); + attr.exclude_kernel = (p != nsk); attr.exclude_hv = 1; /* Don't count EL2 events */ attr.exclude_host = 1; /* Don't count host events */ attr.config = eventsel; diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index e3899bd77f5c..9c226adf938a 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -234,9 +234,12 @@ /* * Event filters for PMUv3 */ -#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) -#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) -#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29) +#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) +#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26) /* * PMUSERENR: user enable reg -- cgit v1.2.3 From 427733579744ef22ee6d0da9907560d79d937458 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:42 +0000 Subject: KVM: arm64: Select default PMU in KVM_ARM_VCPU_INIT handler Future changes to KVM's sysreg emulation will rely on having a valid PMU instance to determine the number of implemented counters (PMCR_EL0.N). This is earlier than when userspace is expected to modify the vPMU device attributes, where the default is selected today. Select the default PMU when handling KVM_ARM_VCPU_INIT such that it is available in time for sysreg emulation. Reviewed-by: Sebastian Ott Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-3-rananta@google.com [Oliver: rewrite changelog] Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 19 +++++++++++++++++++ arch/arm64/kvm/pmu-emul.c | 16 ++++------------ include/kvm/arm_pmu.h | 6 ++++++ 3 files changed, 29 insertions(+), 12 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 4866b3f7b4ea..0771c42d02e4 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -1229,6 +1229,21 @@ static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, return !bitmap_equal(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); } +static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) +{ + struct kvm *kvm = vcpu->kvm; + int ret = 0; + + /* + * When the vCPU has a PMU, but no PMU is set for the guest + * yet, set the default one. + */ + if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) + ret = kvm_arm_set_default_pmu(kvm); + + return ret; +} + static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, const struct kvm_vcpu_init *init) { @@ -1244,6 +1259,10 @@ static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, bitmap_copy(vcpu->arch.features, &features, KVM_VCPU_MAX_FEATURES); + ret = kvm_setup_vcpu(vcpu); + if (ret) + goto out_unlock; + /* Now we know what it is, we can reset it. */ ret = kvm_reset_vcpu(vcpu); if (ret) { diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index fb9817bdfeb5..1eb02765dd14 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -717,10 +717,9 @@ static struct arm_pmu *kvm_pmu_probe_armpmu(void) * It is still necessary to get a valid cpu, though, to probe for the * default PMU instance as userspace is not required to specify a PMU * type. In order to uphold the preexisting behavior KVM selects the - * PMU instance for the core where the first call to the - * KVM_ARM_VCPU_PMU_V3_CTRL attribute group occurs. A dependent use case - * would be a user with disdain of all things big.LITTLE that affines - * the VMM to a particular cluster of cores. + * PMU instance for the core during vcpu init. A dependent use + * case would be a user with disdain of all things big.LITTLE that + * affines the VMM to a particular cluster of cores. * * In any case, userspace should just do the sane thing and use the UAPI * to select a PMU type directly. But, be wary of the baggage being @@ -893,7 +892,7 @@ static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) * where vCPUs can be scheduled on any core but the guest * counters could stop working. */ -static int kvm_arm_set_default_pmu(struct kvm *kvm) +int kvm_arm_set_default_pmu(struct kvm *kvm) { struct arm_pmu *arm_pmu = kvm_pmu_probe_armpmu(); @@ -946,13 +945,6 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) if (vcpu->arch.pmu.created) return -EBUSY; - if (!kvm->arch.arm_pmu) { - int ret = kvm_arm_set_default_pmu(kvm); - - if (ret) - return ret; - } - switch (attr->attr) { case KVM_ARM_VCPU_PMU_V3_IRQ: { int __user *uaddr = (int __user *)(long)attr->addr; diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 31029f4f7be8..b80c75d80886 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -101,6 +101,7 @@ void kvm_vcpu_pmu_resync_el0(void); }) u8 kvm_arm_pmu_get_pmuver_limit(void); +int kvm_arm_set_default_pmu(struct kvm *kvm); #else struct kvm_pmu { @@ -174,6 +175,11 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void) } static inline void kvm_vcpu_pmu_resync_el0(void) {} +static inline int kvm_arm_set_default_pmu(struct kvm *kvm) +{ + return -ENODEV; +} + #endif #endif -- cgit v1.2.3 From 57fc267f1b5caa56dfb46f60e20e673cbc4cc4a8 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:43 +0000 Subject: KVM: arm64: PMU: Add a helper to read a vCPU's PMCR_EL0 Add a helper to read a vCPU's PMCR_EL0, and use it whenever KVM reads a vCPU's PMCR_EL0. Currently, the PMCR_EL0 value is tracked per vCPU. The following patches will make (only) PMCR_EL0.N track per guest. Having the new helper will be useful to combine the PMCR_EL0.N field (tracked per guest) and the other fields (tracked per vCPU) to provide the value of PMCR_EL0. No functional change intended. Reviewed-by: Sebastian Ott Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Reviewed-by: Eric Auger Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-4-rananta@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 3 +-- arch/arm64/kvm/pmu-emul.c | 21 +++++++++++++++------ arch/arm64/kvm/sys_regs.c | 6 +++--- include/kvm/arm_pmu.h | 6 ++++++ 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 0771c42d02e4..624e62b2345a 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -801,8 +801,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, - __vcpu_sys_reg(vcpu, PMCR_EL0)); + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 1eb02765dd14..6337ba6f7941 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -72,7 +72,7 @@ static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) { - u64 val = __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), PMCR_EL0); + u64 val = kvm_vcpu_read_pmcr(kvm_pmc_to_vcpu(pmc)); return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); @@ -250,7 +250,7 @@ void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) { - u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; + u64 val = kvm_vcpu_read_pmcr(vcpu) >> ARMV8_PMU_PMCR_N_SHIFT; val &= ARMV8_PMU_PMCR_N_MASK; if (val == 0) @@ -272,7 +272,7 @@ void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) if (!kvm_vcpu_has_pmu(vcpu)) return; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) || !val) return; for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { @@ -324,7 +324,7 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) { u64 reg = 0; - if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { + if ((kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) { reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); @@ -426,7 +426,7 @@ static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, { int i; - if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) + if (!(kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E)) return; /* Weed out disabled counters */ @@ -569,7 +569,7 @@ void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) { struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); - return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && + return (kvm_vcpu_read_pmcr(vcpu) & ARMV8_PMU_PMCR_E) && (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx)); } @@ -1084,3 +1084,12 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) ID_AA64DFR0_EL1_PMUVer_V3P5); return FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), tmp); } + +/** + * kvm_vcpu_read_pmcr - Read PMCR_EL0 register for the vCPU + * @vcpu: The vcpu pointer + */ +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return __vcpu_sys_reg(vcpu, PMCR_EL0); +} diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e92ec810d449..d490cd903e2b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -822,7 +822,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, * Only update writeable bits of PMCR (continuing into * kvm_pmu_handle_pmcr() as well) */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0); + val = kvm_vcpu_read_pmcr(vcpu); val &= ~ARMV8_PMU_PMCR_MASK; val |= p->regval & ARMV8_PMU_PMCR_MASK; if (!kvm_supports_32bit_el0()) @@ -830,7 +830,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, kvm_pmu_handle_pmcr(vcpu, val); } else { /* PMCR.P & PMCR.C are RAZ */ - val = __vcpu_sys_reg(vcpu, PMCR_EL0) + val = kvm_vcpu_read_pmcr(vcpu) & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C); p->regval = val; } @@ -879,7 +879,7 @@ static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx) { u64 pmcr, val; - pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0); + pmcr = kvm_vcpu_read_pmcr(vcpu); val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) { kvm_inject_undefined(vcpu); diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index b80c75d80886..c6a5e49b850a 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -103,6 +103,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); int kvm_arm_set_default_pmu(struct kvm *kvm); +u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else struct kvm_pmu { }; @@ -180,6 +181,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) return -ENODEV; } +static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) +{ + return 0; +} + #endif #endif -- cgit v1.2.3 From 4d20debf9ca160720a0b01ba4f2dc3d62296c4d1 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:44 +0000 Subject: KVM: arm64: PMU: Set PMCR_EL0.N for vCPU based on the associated PMU The number of PMU event counters is indicated in PMCR_EL0.N. For a vCPU with PMUv3 configured, the value is set to the same value as the current PE on every vCPU reset. Unless the vCPU is pinned to PEs that has the PMU associated to the guest from the initial vCPU reset, the value might be different from the PMU's PMCR_EL0.N on heterogeneous PMU systems. Fix this by setting the vCPU's PMCR_EL0.N to the PMU's PMCR_EL0.N value. Track the PMCR_EL0.N per guest, as only one PMU can be set for the guest (PMCR_EL0.N must be the same for all vCPUs of the guest), and it is convenient for updating the value. To achieve this, the patch introduces a helper, kvm_arm_pmu_get_max_counters(), that reads the maximum number of counters from the arm_pmu associated to the VM. Make the function global as upcoming patches will be interested to know the value while setting the PMCR.N of the guest from userspace. KVM does not yet support userspace modifying PMCR_EL0.N. The following patch will add support for that. Reviewed-by: Sebastian Ott Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-5-rananta@google.com Signed-off-by: Oliver Upton --- arch/arm64/include/asm/kvm_host.h | 3 +++ arch/arm64/kvm/pmu-emul.c | 21 ++++++++++++++++++++- arch/arm64/kvm/sys_regs.c | 28 ++++++++++++++-------------- include/kvm/arm_pmu.h | 6 ++++++ 4 files changed, 43 insertions(+), 15 deletions(-) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index af06ccb7ee34..0fe9cc9dc836 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -257,6 +257,9 @@ struct kvm_arch { cpumask_var_t supported_cpus; + /* PMCR_EL0.N value for the guest */ + u8 pmcr_n; + /* Hypercall features firmware registers' descriptor */ struct kvm_smccc_features smccc_feat; struct maple_tree smccc_filter; diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6337ba6f7941..6d4763d7308f 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -873,11 +873,27 @@ static bool pmu_irq_is_valid(struct kvm *kvm, int irq) return true; } +/** + * kvm_arm_pmu_get_max_counters - Return the max number of PMU counters. + * @kvm: The kvm pointer + */ +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + struct arm_pmu *arm_pmu = kvm->arch.arm_pmu; + + /* + * The arm_pmu->num_events considers the cycle counter as well. + * Ignore that and return only the general-purpose counters. + */ + return arm_pmu->num_events - 1; +} + static void kvm_arm_set_pmu(struct kvm *kvm, struct arm_pmu *arm_pmu) { lockdep_assert_held(&kvm->arch.config_lock); kvm->arch.arm_pmu = arm_pmu; + kvm->arch.pmcr_n = kvm_arm_pmu_get_max_counters(kvm); } /** @@ -1091,5 +1107,8 @@ u8 kvm_arm_pmu_get_pmuver_limit(void) */ u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { - return __vcpu_sys_reg(vcpu, PMCR_EL0); + u64 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0) & + ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + + return pmcr | ((u64)vcpu->kvm->arch.pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index d490cd903e2b..6dcfac15fe49 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -721,12 +721,7 @@ static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); - /* No PMU available, any PMU reg may UNDEF... */ - if (!kvm_arm_support_pmu_v3()) - return 0; - - n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT; - n &= ARMV8_PMU_PMCR_N_MASK; + n = vcpu->kvm->arch.pmcr_n; if (n) mask |= GENMASK(n - 1, 0); @@ -762,17 +757,15 @@ static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 pmcr; + u64 pmcr = 0; - /* No PMU available, PMCR_EL0 may UNDEF... */ - if (!kvm_arm_support_pmu_v3()) - return 0; - - /* Only preserve PMCR_EL0.N, and reset the rest to 0 */ - pmcr = read_sysreg(pmcr_el0) & (ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); if (!kvm_supports_32bit_el0()) pmcr |= ARMV8_PMU_PMCR_LC; + /* + * The value of PMCR.N field is included when the + * vCPU register is read via kvm_vcpu_read_pmcr(). + */ __vcpu_sys_reg(vcpu, r->reg) = pmcr; return __vcpu_sys_reg(vcpu, r->reg); @@ -1103,6 +1096,13 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 *val) +{ + *val = kvm_vcpu_read_pmcr(vcpu); + return 0; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ @@ -2167,7 +2167,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_SVCR), undef_access }, { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, - .reset = reset_pmcr, .reg = PMCR_EL0 }, + .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, { PMU_SYS_REG(PMCNTENCLR_EL0), diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index c6a5e49b850a..96e15f4bb18c 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -102,6 +102,7 @@ void kvm_vcpu_pmu_resync_el0(void); u8 kvm_arm_pmu_get_pmuver_limit(void); int kvm_arm_set_default_pmu(struct kvm *kvm); +u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm); u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu); #else @@ -181,6 +182,11 @@ static inline int kvm_arm_set_default_pmu(struct kvm *kvm) return -ENODEV; } +static inline u8 kvm_arm_pmu_get_max_counters(struct kvm *kvm) +{ + return 0; +} + static inline u64 kvm_vcpu_read_pmcr(struct kvm_vcpu *vcpu) { return 0; -- cgit v1.2.3 From a45f41d754e0b37de4b7dc1fb3c6b7a1285882fc Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:45 +0000 Subject: KVM: arm64: Add {get,set}_user for PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} For unimplemented counters, the bits in PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} registers are expected to RAZ. To honor this, explicitly implement the {get,set}_user functions for these registers to mask out unimplemented counters for userspace reads and writes. Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-6-rananta@google.com [Oliver: drop unnecessary locking] Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 51 +++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 6dcfac15fe49..319e8423f899 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -987,6 +987,39 @@ static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p, return true; } +static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val) +{ + bool set; + + val &= kvm_pmu_valid_counter_mask(vcpu); + + switch (r->reg) { + case PMOVSSET_EL0: + /* CRm[1] being set indicates a SET register, and CLR otherwise */ + set = r->CRm & 2; + break; + default: + /* Op2[0] being set indicates a SET register, and CLR otherwise */ + set = r->Op2 & 1; + break; + } + + if (set) + __vcpu_sys_reg(vcpu, r->reg) |= val; + else + __vcpu_sys_reg(vcpu, r->reg) &= ~val; + + return 0; +} + +static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + *val = __vcpu_sys_reg(vcpu, r->reg) & mask; + return 0; +} + static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { @@ -2116,9 +2149,11 @@ static const struct sys_reg_desc sys_reg_descs[] = { /* PMBIDR_EL1 is not trapped */ { PMU_SYS_REG(PMINTENSET_EL1), - .access = access_pminten, .reg = PMINTENSET_EL1 }, + .access = access_pminten, .reg = PMINTENSET_EL1, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMINTENCLR_EL1), - .access = access_pminten, .reg = PMINTENSET_EL1 }, + .access = access_pminten, .reg = PMINTENSET_EL1, + .get_user = get_pmreg, .set_user = set_pmreg }, { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi }, { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 }, @@ -2169,11 +2204,14 @@ static const struct sys_reg_desc sys_reg_descs[] = { { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), - .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, + .access = access_pmcnten, .reg = PMCNTENSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMCNTENCLR_EL0), - .access = access_pmcnten, .reg = PMCNTENSET_EL0 }, + .access = access_pmcnten, .reg = PMCNTENSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { PMU_SYS_REG(PMOVSCLR_EL0), - .access = access_pmovs, .reg = PMOVSSET_EL0 }, + .access = access_pmovs, .reg = PMOVSSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, /* * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was * previously (and pointlessly) advertised in the past... @@ -2201,7 +2239,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr, .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 }, { PMU_SYS_REG(PMOVSSET_EL0), - .access = access_pmovs, .reg = PMOVSSET_EL0 }, + .access = access_pmovs, .reg = PMOVSSET_EL0, + .get_user = get_pmreg, .set_user = set_pmreg }, { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 }, { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 }, -- cgit v1.2.3 From 27131b199f9fdc0e15baa0ff9d1695b54a96e39c Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:46 +0000 Subject: KVM: arm64: Sanitize PM{C,I}NTEN{SET,CLR}, PMOVS{SET,CLR} before first run For unimplemented counters, the registers PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} are expected to have the corresponding bits RAZ. Hence to ensure correct KVM's PMU emulation, mask out the RES0 bits. Defer this work to the point that userspace can no longer change the number of advertised PMCs. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-7-rananta@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/arm.c | 2 +- arch/arm64/kvm/pmu-emul.c | 11 +++++++++++ include/kvm/arm_pmu.h | 3 ++- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 624e62b2345a..f6307afc5036 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -801,7 +801,7 @@ static int check_vcpu_requests(struct kvm_vcpu *vcpu) } if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) - kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); + kvm_vcpu_reload_pmu(vcpu); if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) kvm_vcpu_pmu_restore_guest(vcpu); diff --git a/arch/arm64/kvm/pmu-emul.c b/arch/arm64/kvm/pmu-emul.c index 6d4763d7308f..6b271c542409 100644 --- a/arch/arm64/kvm/pmu-emul.c +++ b/arch/arm64/kvm/pmu-emul.c @@ -785,6 +785,17 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) return val & mask; } +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) +{ + u64 mask = kvm_pmu_valid_counter_mask(vcpu); + + kvm_pmu_handle_pmcr(vcpu, kvm_vcpu_read_pmcr(vcpu)); + + __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= mask; + __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= mask; + __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= mask; +} + int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) { if (!kvm_vcpu_has_pmu(vcpu)) diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h index 96e15f4bb18c..abb1e8215ca8 100644 --- a/include/kvm/arm_pmu.h +++ b/include/kvm/arm_pmu.h @@ -13,7 +13,6 @@ #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) #if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM) - struct kvm_pmc { u8 idx; /* index into the pmu->pmc array */ struct perf_event *perf_event; @@ -63,6 +62,7 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, u64 select_idx); +void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu); int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, @@ -171,6 +171,7 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} +static inline void kvm_vcpu_reload_pmu(struct kvm_vcpu *vcpu) {} static inline u8 kvm_arm_pmu_get_pmuver_limit(void) { return 0; -- cgit v1.2.3 From ea9ca904d24ff15ded92fd76c16462c47bcae2f8 Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:47 +0000 Subject: KVM: arm64: PMU: Allow userspace to limit PMCR_EL0.N for the guest KVM does not yet support userspace modifying PMCR_EL0.N (With the previous patch, KVM ignores what is written by userspace). Add support userspace limiting PMCR_EL0.N. Disallow userspace to set PMCR_EL0.N to a value that is greater than the host value as KVM doesn't support more event counters than what the host HW implements. Also, make this register immutable after the VM has started running. To maintain the existing expectations, instead of returning an error, KVM returns a success for these two cases. Finally, ignore writes to read-only bits that are cleared on vCPU reset, and RES{0,1} bits (including writable bits that KVM doesn't support yet), as those bits shouldn't be modified (at least with the current KVM). Co-developed-by: Marc Zyngier Signed-off-by: Marc Zyngier Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Link: https://lore.kernel.org/r/20231020214053.2144305-8-rananta@google.com Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 46 ++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 42 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 319e8423f899..9a89064a8c1a 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -719,9 +719,9 @@ static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu, static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) { - u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX); + u64 mask = BIT(ARMV8_PMU_CYCLE_IDX); + u8 n = vcpu->kvm->arch.pmcr_n; - n = vcpu->kvm->arch.pmcr_n; if (n) mask |= GENMASK(n - 1, 0); @@ -1136,6 +1136,44 @@ static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, return 0; } +static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, + u64 val) +{ + u8 new_n = (val >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; + struct kvm *kvm = vcpu->kvm; + + mutex_lock(&kvm->arch.config_lock); + + /* + * The vCPU can't have more counters than the PMU hardware + * implements. Ignore this error to maintain compatibility + * with the existing KVM behavior. + */ + if (!kvm_vm_has_ran_once(kvm) && + new_n <= kvm_arm_pmu_get_max_counters(kvm)) + kvm->arch.pmcr_n = new_n; + + mutex_unlock(&kvm->arch.config_lock); + + /* + * Ignore writes to RES0 bits, read only bits that are cleared on + * vCPU reset, and writable bits that KVM doesn't support yet. + * (i.e. only PMCR.N and bits [7:0] are mutable from userspace) + * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU. + * But, we leave the bit as it is here, as the vCPU's PMUver might + * be changed later (NOTE: the bit will be cleared on first vCPU run + * if necessary). + */ + val &= ARMV8_PMU_PMCR_MASK; + + /* The LC bit is RES1 when AArch32 is not supported */ + if (!kvm_supports_32bit_el0()) + val |= ARMV8_PMU_PMCR_LC; + + __vcpu_sys_reg(vcpu, r->reg) = val; + return 0; +} + /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */ #define DBG_BCR_BVR_WCR_WVR_EL1(n) \ { SYS_DESC(SYS_DBGBVRn_EL1(n)), \ @@ -2201,8 +2239,8 @@ static const struct sys_reg_desc sys_reg_descs[] = { { SYS_DESC(SYS_CTR_EL0), access_ctr }, { SYS_DESC(SYS_SVCR), undef_access }, - { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, - .reset = reset_pmcr, .reg = PMCR_EL0, .get_user = get_pmcr }, + { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr, + .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr }, { PMU_SYS_REG(PMCNTENSET_EL0), .access = access_pmcnten, .reg = PMCNTENSET_EL0, .get_user = get_pmreg, .set_user = set_pmreg }, -- cgit v1.2.3 From 9f4b3273dfbe0ecb628b65fe9a80aae17caba20f Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:48 +0000 Subject: tools: Import arm_pmuv3.h Import kernel's include/linux/perf/arm_pmuv3.h, with the definition of PMEVN_SWITCH() additionally including an assert() for the 'default' case. The following patches will use macros defined in this header. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-9-rananta@google.com Signed-off-by: Oliver Upton --- tools/include/perf/arm_pmuv3.h | 308 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 308 insertions(+) create mode 100644 tools/include/perf/arm_pmuv3.h diff --git a/tools/include/perf/arm_pmuv3.h b/tools/include/perf/arm_pmuv3.h new file mode 100644 index 000000000000..e822d49fb5b8 --- /dev/null +++ b/tools/include/perf/arm_pmuv3.h @@ -0,0 +1,308 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#ifndef __PERF_ARM_PMUV3_H +#define __PERF_ARM_PMUV3_H + +#include +#include + +#define ARMV8_PMU_MAX_COUNTERS 32 +#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) + +/* + * Common architectural and microarchitectural event numbers. + */ +#define ARMV8_PMUV3_PERFCTR_SW_INCR 0x0000 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL 0x0001 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL 0x0002 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL 0x0003 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE 0x0004 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL 0x0005 +#define ARMV8_PMUV3_PERFCTR_LD_RETIRED 0x0006 +#define ARMV8_PMUV3_PERFCTR_ST_RETIRED 0x0007 +#define ARMV8_PMUV3_PERFCTR_INST_RETIRED 0x0008 +#define ARMV8_PMUV3_PERFCTR_EXC_TAKEN 0x0009 +#define ARMV8_PMUV3_PERFCTR_EXC_RETURN 0x000A +#define ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED 0x000B +#define ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED 0x000C +#define ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED 0x000D +#define ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED 0x000E +#define ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED 0x000F +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED 0x0010 +#define ARMV8_PMUV3_PERFCTR_CPU_CYCLES 0x0011 +#define ARMV8_PMUV3_PERFCTR_BR_PRED 0x0012 +#define ARMV8_PMUV3_PERFCTR_MEM_ACCESS 0x0013 +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE 0x0014 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB 0x0015 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE 0x0016 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL 0x0017 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB 0x0018 +#define ARMV8_PMUV3_PERFCTR_BUS_ACCESS 0x0019 +#define ARMV8_PMUV3_PERFCTR_MEMORY_ERROR 0x001A +#define ARMV8_PMUV3_PERFCTR_INST_SPEC 0x001B +#define ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED 0x001C +#define ARMV8_PMUV3_PERFCTR_BUS_CYCLES 0x001D +#define ARMV8_PMUV3_PERFCTR_CHAIN 0x001E +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE 0x001F +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE 0x0020 +#define ARMV8_PMUV3_PERFCTR_BR_RETIRED 0x0021 +#define ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED 0x0022 +#define ARMV8_PMUV3_PERFCTR_STALL_FRONTEND 0x0023 +#define ARMV8_PMUV3_PERFCTR_STALL_BACKEND 0x0024 +#define ARMV8_PMUV3_PERFCTR_L1D_TLB 0x0025 +#define ARMV8_PMUV3_PERFCTR_L1I_TLB 0x0026 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE 0x0027 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL 0x0028 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE 0x0029 +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL 0x002A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE 0x002B +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB 0x002C +#define ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL 0x002D +#define ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL 0x002E +#define ARMV8_PMUV3_PERFCTR_L2D_TLB 0x002F +#define ARMV8_PMUV3_PERFCTR_L2I_TLB 0x0030 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS 0x0031 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE 0x0032 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS 0x0033 +#define ARMV8_PMUV3_PERFCTR_DTLB_WALK 0x0034 +#define ARMV8_PMUV3_PERFCTR_ITLB_WALK 0x0035 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_RD 0x0036 +#define ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD 0x0037 +#define ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD 0x0038 +#define ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD 0x0039 +#define ARMV8_PMUV3_PERFCTR_OP_RETIRED 0x003A +#define ARMV8_PMUV3_PERFCTR_OP_SPEC 0x003B +#define ARMV8_PMUV3_PERFCTR_STALL 0x003C +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND 0x003D +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND 0x003E +#define ARMV8_PMUV3_PERFCTR_STALL_SLOT 0x003F + +/* Statistical profiling extension microarchitectural events */ +#define ARMV8_SPE_PERFCTR_SAMPLE_POP 0x4000 +#define ARMV8_SPE_PERFCTR_SAMPLE_FEED 0x4001 +#define ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE 0x4002 +#define ARMV8_SPE_PERFCTR_SAMPLE_COLLISION 0x4003 + +/* AMUv1 architecture events */ +#define ARMV8_AMU_PERFCTR_CNT_CYCLES 0x4004 +#define ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM 0x4005 + +/* long-latency read miss events */ +#define ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS 0x4006 +#define ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD 0x4009 +#define ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS 0x400A +#define ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD 0x400B + +/* Trace buffer events */ +#define ARMV8_PMUV3_PERFCTR_TRB_WRAP 0x400C +#define ARMV8_PMUV3_PERFCTR_TRB_TRIG 0x400E + +/* Trace unit events */ +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT0 0x4010 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT1 0x4011 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT2 0x4012 +#define ARMV8_PMUV3_PERFCTR_TRCEXTOUT3 0x4013 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4 0x4018 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5 0x4019 +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6 0x401A +#define ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7 0x401B + +/* additional latency from alignment events */ +#define ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT 0x4020 +#define ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT 0x4021 +#define ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT 0x4022 + +/* Armv8.5 Memory Tagging Extension events */ +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED 0x4024 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD 0x4025 +#define ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR 0x4026 + +/* ARMv8 recommended implementation defined event types */ +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD 0x0040 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR 0x0041 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD 0x0042 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR 0x0043 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_INNER 0x0044 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_OUTER 0x0045 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_VICTIM 0x0046 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WB_CLEAN 0x0047 +#define ARMV8_IMPDEF_PERFCTR_L1D_CACHE_INVAL 0x0048 + +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD 0x004C +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR 0x004D +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD 0x004E +#define ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR 0x004F +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_RD 0x0050 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WR 0x0051 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_RD 0x0052 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_REFILL_WR 0x0053 + +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_VICTIM 0x0056 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_WB_CLEAN 0x0057 +#define ARMV8_IMPDEF_PERFCTR_L2D_CACHE_INVAL 0x0058 + +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_RD 0x005C +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_REFILL_WR 0x005D +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_RD 0x005E +#define ARMV8_IMPDEF_PERFCTR_L2D_TLB_WR 0x005F +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD 0x0060 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR 0x0061 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_SHARED 0x0062 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NOT_SHARED 0x0063 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_NORMAL 0x0064 +#define ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_PERIPH 0x0065 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_RD 0x0066 +#define ARMV8_IMPDEF_PERFCTR_MEM_ACCESS_WR 0x0067 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LD_SPEC 0x0068 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_ST_SPEC 0x0069 +#define ARMV8_IMPDEF_PERFCTR_UNALIGNED_LDST_SPEC 0x006A + +#define ARMV8_IMPDEF_PERFCTR_LDREX_SPEC 0x006C +#define ARMV8_IMPDEF_PERFCTR_STREX_PASS_SPEC 0x006D +#define ARMV8_IMPDEF_PERFCTR_STREX_FAIL_SPEC 0x006E +#define ARMV8_IMPDEF_PERFCTR_STREX_SPEC 0x006F +#define ARMV8_IMPDEF_PERFCTR_LD_SPEC 0x0070 +#define ARMV8_IMPDEF_PERFCTR_ST_SPEC 0x0071 +#define ARMV8_IMPDEF_PERFCTR_LDST_SPEC 0x0072 +#define ARMV8_IMPDEF_PERFCTR_DP_SPEC 0x0073 +#define ARMV8_IMPDEF_PERFCTR_ASE_SPEC 0x0074 +#define ARMV8_IMPDEF_PERFCTR_VFP_SPEC 0x0075 +#define ARMV8_IMPDEF_PERFCTR_PC_WRITE_SPEC 0x0076 +#define ARMV8_IMPDEF_PERFCTR_CRYPTO_SPEC 0x0077 +#define ARMV8_IMPDEF_PERFCTR_BR_IMMED_SPEC 0x0078 +#define ARMV8_IMPDEF_PERFCTR_BR_RETURN_SPEC 0x0079 +#define ARMV8_IMPDEF_PERFCTR_BR_INDIRECT_SPEC 0x007A + +#define ARMV8_IMPDEF_PERFCTR_ISB_SPEC 0x007C +#define ARMV8_IMPDEF_PERFCTR_DSB_SPEC 0x007D +#define ARMV8_IMPDEF_PERFCTR_DMB_SPEC 0x007E + +#define ARMV8_IMPDEF_PERFCTR_EXC_UNDEF 0x0081 +#define ARMV8_IMPDEF_PERFCTR_EXC_SVC 0x0082 +#define ARMV8_IMPDEF_PERFCTR_EXC_PABORT 0x0083 +#define ARMV8_IMPDEF_PERFCTR_EXC_DABORT 0x0084 + +#define ARMV8_IMPDEF_PERFCTR_EXC_IRQ 0x0086 +#define ARMV8_IMPDEF_PERFCTR_EXC_FIQ 0x0087 +#define ARMV8_IMPDEF_PERFCTR_EXC_SMC 0x0088 + +#define ARMV8_IMPDEF_PERFCTR_EXC_HVC 0x008A +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_PABORT 0x008B +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_DABORT 0x008C +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_OTHER 0x008D +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_IRQ 0x008E +#define ARMV8_IMPDEF_PERFCTR_EXC_TRAP_FIQ 0x008F +#define ARMV8_IMPDEF_PERFCTR_RC_LD_SPEC 0x0090 +#define ARMV8_IMPDEF_PERFCTR_RC_ST_SPEC 0x0091 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_RD 0x00A0 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WR 0x00A1 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_RD 0x00A2 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_REFILL_WR 0x00A3 + +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_VICTIM 0x00A6 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_WB_CLEAN 0x00A7 +#define ARMV8_IMPDEF_PERFCTR_L3D_CACHE_INVAL 0x00A8 + +/* + * Per-CPU PMCR: config reg + */ +#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */ +#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */ +#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */ +#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */ +#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV8_PMU_PMCR_LC (1 << 6) /* Overflow on 64 bit cycle counter */ +#define ARMV8_PMU_PMCR_LP (1 << 7) /* Long event counter enable */ +#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */ +#define ARMV8_PMU_PMCR_N_MASK 0x1f +#define ARMV8_PMU_PMCR_MASK 0xff /* Mask for writable bits */ + +/* + * PMOVSR: counters overflow flag status reg + */ +#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK + +/* + * PMXEVTYPER: Event selection reg + */ +#define ARMV8_PMU_EVTYPE_MASK 0xc800ffff /* Mask for writable bits */ +#define ARMV8_PMU_EVTYPE_EVENT 0xffff /* Mask for EVENT bits */ + +/* + * Event filters for PMUv3 + */ +#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31) +#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30) +#define ARMV8_PMU_INCLUDE_EL2 (1U << 27) + +/* + * PMUSERENR: user enable reg + */ +#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */ +#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */ +#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */ +#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */ +#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */ + +/* PMMIR_EL1.SLOTS mask */ +#define ARMV8_PMU_SLOTS_MASK 0xff + +#define ARMV8_PMU_BUS_SLOTS_SHIFT 8 +#define ARMV8_PMU_BUS_SLOTS_MASK 0xff +#define ARMV8_PMU_BUS_WIDTH_SHIFT 16 +#define ARMV8_PMU_BUS_WIDTH_MASK 0xf + +/* + * This code is really good + */ + +#define PMEVN_CASE(n, case_macro) \ + case n: case_macro(n); break + +#define PMEVN_SWITCH(x, case_macro) \ + do { \ + switch (x) { \ + PMEVN_CASE(0, case_macro); \ + PMEVN_CASE(1, case_macro); \ + PMEVN_CASE(2, case_macro); \ + PMEVN_CASE(3, case_macro); \ + PMEVN_CASE(4, case_macro); \ + PMEVN_CASE(5, case_macro); \ + PMEVN_CASE(6, case_macro); \ + PMEVN_CASE(7, case_macro); \ + PMEVN_CASE(8, case_macro); \ + PMEVN_CASE(9, case_macro); \ + PMEVN_CASE(10, case_macro); \ + PMEVN_CASE(11, case_macro); \ + PMEVN_CASE(12, case_macro); \ + PMEVN_CASE(13, case_macro); \ + PMEVN_CASE(14, case_macro); \ + PMEVN_CASE(15, case_macro); \ + PMEVN_CASE(16, case_macro); \ + PMEVN_CASE(17, case_macro); \ + PMEVN_CASE(18, case_macro); \ + PMEVN_CASE(19, case_macro); \ + PMEVN_CASE(20, case_macro); \ + PMEVN_CASE(21, case_macro); \ + PMEVN_CASE(22, case_macro); \ + PMEVN_CASE(23, case_macro); \ + PMEVN_CASE(24, case_macro); \ + PMEVN_CASE(25, case_macro); \ + PMEVN_CASE(26, case_macro); \ + PMEVN_CASE(27, case_macro); \ + PMEVN_CASE(28, case_macro); \ + PMEVN_CASE(29, case_macro); \ + PMEVN_CASE(30, case_macro); \ + default: \ + WARN(1, "Invalid PMEV* index\n"); \ + assert(0); \ + } \ + } while (0) + +#endif -- cgit v1.2.3 From 8d0aebe1ca2bd6f86ee2b4de30c8a4aafcf2908b Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:49 +0000 Subject: KVM: selftests: aarch64: Introduce vpmu_counter_access test Introduce vpmu_counter_access test for arm64 platforms. The test configures PMUv3 for a vCPU, sets PMCR_EL0.N for the vCPU, and check if the guest can consistently see the same number of the PMU event counters (PMCR_EL0.N) that userspace sets. This test case is done with each of the PMCR_EL0.N values from 0 to 31 (With the PMCR_EL0.N values greater than the host value, the test expects KVM_SET_ONE_REG for the PMCR_EL0 to fail). Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-10-rananta@google.com Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/Makefile | 1 + .../selftests/kvm/aarch64/vpmu_counter_access.c | 255 +++++++++++++++++++++ 2 files changed, 256 insertions(+) create mode 100644 tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index a3bb36fb3cfc..416700aa196c 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -149,6 +149,7 @@ TEST_GEN_PROGS_aarch64 += aarch64/smccc_filter TEST_GEN_PROGS_aarch64 += aarch64/vcpu_width_config TEST_GEN_PROGS_aarch64 += aarch64/vgic_init TEST_GEN_PROGS_aarch64 += aarch64/vgic_irq +TEST_GEN_PROGS_aarch64 += aarch64/vpmu_counter_access TEST_GEN_PROGS_aarch64 += access_tracking_perf_test TEST_GEN_PROGS_aarch64 += demand_paging_test TEST_GEN_PROGS_aarch64 += dirty_log_test diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c new file mode 100644 index 000000000000..4c6e1fe87e0e --- /dev/null +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * vpmu_counter_access - Test vPMU event counter access + * + * Copyright (c) 2023 Google LLC. + * + * This test checks if the guest can see the same number of the PMU event + * counters (PMCR_EL0.N) that userspace sets. + * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. + */ +#include +#include +#include +#include +#include +#include + +/* The max number of the PMU event counters (excluding the cycle counter) */ +#define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) + +struct vpmu_vm { + struct kvm_vm *vm; + struct kvm_vcpu *vcpu; + int gic_fd; +}; + +static struct vpmu_vm vpmu_vm; + +static uint64_t get_pmcr_n(uint64_t pmcr) +{ + return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; +} + +static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) +{ + *pmcr = *pmcr & ~(ARMV8_PMU_PMCR_N_MASK << ARMV8_PMU_PMCR_N_SHIFT); + *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); +} + +static void guest_sync_handler(struct ex_regs *regs) +{ + uint64_t esr, ec; + + esr = read_sysreg(esr_el1); + ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK; + __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec); +} + +/* + * The guest is configured with PMUv3 with @expected_pmcr_n number of + * event counters. + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N. + */ +static void guest_code(uint64_t expected_pmcr_n) +{ + uint64_t pmcr, pmcr_n; + + __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, + "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", + expected_pmcr_n, ARMV8_PMU_MAX_GENERAL_COUNTERS); + + pmcr = read_sysreg(pmcr_el0); + pmcr_n = get_pmcr_n(pmcr); + + /* Make sure that PMCR_EL0.N indicates the value userspace set */ + __GUEST_ASSERT(pmcr_n == expected_pmcr_n, + "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", + expected_pmcr_n, pmcr_n); + + GUEST_DONE(); +} + +#define GICD_BASE_GPA 0x8000000ULL +#define GICR_BASE_GPA 0x80A0000ULL + +/* Create a VM that has one vCPU with PMUv3 configured. */ +static void create_vpmu_vm(void *guest_code) +{ + struct kvm_vcpu_init init; + uint8_t pmuver, ec; + uint64_t dfr0, irq = 23; + struct kvm_device_attr irq_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_IRQ, + .addr = (uint64_t)&irq, + }; + struct kvm_device_attr init_attr = { + .group = KVM_ARM_VCPU_PMU_V3_CTRL, + .attr = KVM_ARM_VCPU_PMU_V3_INIT, + }; + + /* The test creates the vpmu_vm multiple times. Ensure a clean state */ + memset(&vpmu_vm, 0, sizeof(vpmu_vm)); + + vpmu_vm.vm = vm_create(1); + vm_init_descriptor_tables(vpmu_vm.vm); + for (ec = 0; ec < ESR_EC_NUM; ec++) { + vm_install_sync_handler(vpmu_vm.vm, VECTOR_SYNC_CURRENT, ec, + guest_sync_handler); + } + + /* Create vCPU with PMUv3 */ + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); + vpmu_vm.vcpu = aarch64_vcpu_add(vpmu_vm.vm, 0, &init, guest_code); + vcpu_init_descriptor_tables(vpmu_vm.vcpu); + vpmu_vm.gic_fd = vgic_v3_setup(vpmu_vm.vm, 1, 64, + GICD_BASE_GPA, GICR_BASE_GPA); + __TEST_REQUIRE(vpmu_vm.gic_fd >= 0, + "Failed to create vgic-v3, skipping"); + + /* Make sure that PMUv3 support is indicated in the ID register */ + vcpu_get_reg(vpmu_vm.vcpu, + KVM_ARM64_SYS_REG(SYS_ID_AA64DFR0_EL1), &dfr0); + pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), dfr0); + TEST_ASSERT(pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && + pmuver >= ID_AA64DFR0_EL1_PMUVer_IMP, + "Unexpected PMUVER (0x%x) on the vCPU with PMUv3", pmuver); + + /* Initialize vPMU */ + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &irq_attr); + vcpu_ioctl(vpmu_vm.vcpu, KVM_SET_DEVICE_ATTR, &init_attr); +} + +static void destroy_vpmu_vm(void) +{ + close(vpmu_vm.gic_fd); + kvm_vm_free(vpmu_vm.vm); +} + +static void run_vcpu(struct kvm_vcpu *vcpu, uint64_t pmcr_n) +{ + struct ucall uc; + + vcpu_args_set(vcpu, 1, pmcr_n); + vcpu_run(vcpu); + switch (get_ucall(vcpu, &uc)) { + case UCALL_ABORT: + REPORT_GUEST_ASSERT(uc); + break; + case UCALL_DONE: + break; + default: + TEST_FAIL("Unknown ucall %lu", uc.cmd); + break; + } +} + +static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) +{ + struct kvm_vcpu *vcpu; + uint64_t pmcr, pmcr_orig; + + create_vpmu_vm(guest_code); + vcpu = vpmu_vm.vcpu; + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr_orig); + pmcr = pmcr_orig; + + /* + * Setting a larger value of PMCR.N should not modify the field, and + * return a success. + */ + set_pmcr_n(&pmcr, pmcr_n); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), pmcr); + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); + + if (expect_fail) + TEST_ASSERT(pmcr_orig == pmcr, + "PMCR.N modified by KVM to a larger value (PMCR: 0x%lx) for pmcr_n: 0x%lx\n", + pmcr, pmcr_n); + else + TEST_ASSERT(pmcr_n == get_pmcr_n(pmcr), + "Failed to update PMCR.N to %lu (received: %lu)\n", + pmcr_n, get_pmcr_n(pmcr)); +} + +/* + * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, + * and run the test. + */ +static void run_test(uint64_t pmcr_n) +{ + uint64_t sp; + struct kvm_vcpu *vcpu; + struct kvm_vcpu_init init; + + pr_debug("Test with pmcr_n %lu\n", pmcr_n); + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + vcpu = vpmu_vm.vcpu; + + /* Save the initial sp to restore them later to run the guest again */ + vcpu_get_reg(vcpu, ARM64_CORE_REG(sp_el1), &sp); + + run_vcpu(vcpu, pmcr_n); + + /* + * Reset and re-initialize the vCPU, and run the guest code again to + * check if PMCR_EL0.N is preserved. + */ + vm_ioctl(vpmu_vm.vm, KVM_ARM_PREFERRED_TARGET, &init); + init.features[0] |= (1 << KVM_ARM_VCPU_PMU_V3); + aarch64_vcpu_setup(vcpu, &init); + vcpu_init_descriptor_tables(vcpu); + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), sp); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + + run_vcpu(vcpu, pmcr_n); + + destroy_vpmu_vm(); +} + +/* + * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for + * the vCPU to @pmcr_n, which is larger than the host value. + * The attempt should fail as @pmcr_n is too big to set for the vCPU. + */ +static void run_error_test(uint64_t pmcr_n) +{ + pr_debug("Error test with pmcr_n %lu (larger than the host)\n", pmcr_n); + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, true); + destroy_vpmu_vm(); +} + +/* + * Return the default number of implemented PMU event counters excluding + * the cycle counter (i.e. PMCR_EL0.N value) for the guest. + */ +static uint64_t get_pmcr_n_limit(void) +{ + uint64_t pmcr; + + create_vpmu_vm(guest_code); + vcpu_get_reg(vpmu_vm.vcpu, KVM_ARM64_SYS_REG(SYS_PMCR_EL0), &pmcr); + destroy_vpmu_vm(); + return get_pmcr_n(pmcr); +} + +int main(void) +{ + uint64_t i, pmcr_n; + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); + + pmcr_n = get_pmcr_n_limit(); + for (i = 0; i <= pmcr_n; i++) + run_test(i); + + for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) + run_error_test(i); + + return 0; +} -- cgit v1.2.3 From ada1ae68262deea2685ccd38136ea2db233dfc4c Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:50 +0000 Subject: KVM: selftests: aarch64: vPMU register test for implemented counters Add a new test case to the vpmu_counter_access test to check if PMU registers or their bits for implemented counters on the vCPU are readable/writable as expected, and can be programmed to count events. Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-11-rananta@google.com Signed-off-by: Oliver Upton --- .../selftests/kvm/aarch64/vpmu_counter_access.c | 270 ++++++++++++++++++++- 1 file changed, 266 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 4c6e1fe87e0e..a579286b6f11 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -5,7 +5,8 @@ * Copyright (c) 2023 Google LLC. * * This test checks if the guest can see the same number of the PMU event - * counters (PMCR_EL0.N) that userspace sets. + * counters (PMCR_EL0.N) that userspace sets, and if the guest can access + * those counters. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -37,6 +38,255 @@ static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } +/* Read PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ +static inline unsigned long read_sel_evcntr(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevcntr_el0); +} + +/* Write PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ +static inline void write_sel_evcntr(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevcntr_el0); + isb(); +} + +/* Read PMEVTYPER_EL0 through PMXEVTYPER_EL0 */ +static inline unsigned long read_sel_evtyper(int sel) +{ + write_sysreg(sel, pmselr_el0); + isb(); + return read_sysreg(pmxevtyper_el0); +} + +/* Write PMEVTYPER_EL0 through PMXEVTYPER_EL0 */ +static inline void write_sel_evtyper(int sel, unsigned long val) +{ + write_sysreg(sel, pmselr_el0); + isb(); + write_sysreg(val, pmxevtyper_el0); + isb(); +} + +static inline void enable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenset_el0); + isb(); +} + +static inline void disable_counter(int idx) +{ + uint64_t v = read_sysreg(pmcntenset_el0); + + write_sysreg(BIT(idx) | v, pmcntenclr_el0); + isb(); +} + +static void pmu_disable_reset(void) +{ + uint64_t pmcr = read_sysreg(pmcr_el0); + + /* Reset all counters, disabling them */ + pmcr &= ~ARMV8_PMU_PMCR_E; + write_sysreg(pmcr | ARMV8_PMU_PMCR_P, pmcr_el0); + isb(); +} + +#define RETURN_READ_PMEVCNTRN(n) \ + return read_sysreg(pmevcntr##n##_el0) +static unsigned long read_pmevcntrn(int n) +{ + PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); + return 0; +} + +#define WRITE_PMEVCNTRN(n) \ + write_sysreg(val, pmevcntr##n##_el0) +static void write_pmevcntrn(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVCNTRN); + isb(); +} + +#define READ_PMEVTYPERN(n) \ + return read_sysreg(pmevtyper##n##_el0) +static unsigned long read_pmevtypern(int n) +{ + PMEVN_SWITCH(n, READ_PMEVTYPERN); + return 0; +} + +#define WRITE_PMEVTYPERN(n) \ + write_sysreg(val, pmevtyper##n##_el0) +static void write_pmevtypern(int n, unsigned long val) +{ + PMEVN_SWITCH(n, WRITE_PMEVTYPERN); + isb(); +} + +/* + * The pmc_accessor structure has pointers to PMEV{CNTR,TYPER}_EL0 + * accessors that test cases will use. Each of the accessors will + * either directly reads/writes PMEV{CNTR,TYPER}_EL0 + * (i.e. {read,write}_pmev{cnt,type}rn()), or reads/writes them through + * PMXEV{CNTR,TYPER}_EL0 (i.e. {read,write}_sel_ev{cnt,type}r()). + * + * This is used to test that combinations of those accessors provide + * the consistent behavior. + */ +struct pmc_accessor { + /* A function to be used to read PMEVTCNTR_EL0 */ + unsigned long (*read_cntr)(int idx); + /* A function to be used to write PMEVTCNTR_EL0 */ + void (*write_cntr)(int idx, unsigned long val); + /* A function to be used to read PMEVTYPER_EL0 */ + unsigned long (*read_typer)(int idx); + /* A function to be used to write PMEVTYPER_EL0 */ + void (*write_typer)(int idx, unsigned long val); +}; + +struct pmc_accessor pmc_accessors[] = { + /* test with all direct accesses */ + { read_pmevcntrn, write_pmevcntrn, read_pmevtypern, write_pmevtypern }, + /* test with all indirect accesses */ + { read_sel_evcntr, write_sel_evcntr, read_sel_evtyper, write_sel_evtyper }, + /* read with direct accesses, and write with indirect accesses */ + { read_pmevcntrn, write_sel_evcntr, read_pmevtypern, write_sel_evtyper }, + /* read with indirect accesses, and write with direct accesses */ + { read_sel_evcntr, write_pmevcntrn, read_sel_evtyper, write_pmevtypern }, +}; + +/* + * Convert a pointer of pmc_accessor to an index in pmc_accessors[], + * assuming that the pointer is one of the entries in pmc_accessors[]. + */ +#define PMC_ACC_TO_IDX(acc) (acc - &pmc_accessors[0]) + +#define GUEST_ASSERT_BITMAP_REG(regname, mask, set_expected) \ +{ \ + uint64_t _tval = read_sysreg(regname); \ + \ + if (set_expected) \ + __GUEST_ASSERT((_tval & mask), \ + "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + _tval, mask, set_expected); \ + else \ + __GUEST_ASSERT(!(_tval & mask), \ + "tval: 0x%lx; mask: 0x%lx; set_expected: 0x%lx", \ + _tval, mask, set_expected); \ +} + +/* + * Check if @mask bits in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers + * are set or cleared as specified in @set_expected. + */ +static void check_bitmap_pmu_regs(uint64_t mask, bool set_expected) +{ + GUEST_ASSERT_BITMAP_REG(pmcntenset_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmcntenclr_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmintenset_el1, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmintenclr_el1, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmovsset_el0, mask, set_expected); + GUEST_ASSERT_BITMAP_REG(pmovsclr_el0, mask, set_expected); +} + +/* + * Check if the bit in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers corresponding + * to the specified counter (@pmc_idx) can be read/written as expected. + * When @set_op is true, it tries to set the bit for the counter in + * those registers by writing the SET registers (the bit won't be set + * if the counter is not implemented though). + * Otherwise, it tries to clear the bits in the registers by writing + * the CLR registers. + * Then, it checks if the values indicated in the registers are as expected. + */ +static void test_bitmap_pmu_regs(int pmc_idx, bool set_op) +{ + uint64_t pmcr_n, test_bit = BIT(pmc_idx); + bool set_expected = false; + + if (set_op) { + write_sysreg(test_bit, pmcntenset_el0); + write_sysreg(test_bit, pmintenset_el1); + write_sysreg(test_bit, pmovsset_el0); + + /* The bit will be set only if the counter is implemented */ + pmcr_n = get_pmcr_n(read_sysreg(pmcr_el0)); + set_expected = (pmc_idx < pmcr_n) ? true : false; + } else { + write_sysreg(test_bit, pmcntenclr_el0); + write_sysreg(test_bit, pmintenclr_el1); + write_sysreg(test_bit, pmovsclr_el0); + } + check_bitmap_pmu_regs(test_bit, set_expected); +} + +/* + * Tests for reading/writing registers for the (implemented) event counter + * specified by @pmc_idx. + */ +static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) +{ + uint64_t write_data, read_data; + + /* Disable all PMCs and reset all PMCs to zero. */ + pmu_disable_reset(); + + /* + * Tests for reading/writing {PMCNTEN,PMINTEN,PMOVS}{SET,CLR}_EL1. + */ + + /* Make sure that the bit in those registers are set to 0 */ + test_bitmap_pmu_regs(pmc_idx, false); + /* Test if setting the bit in those registers works */ + test_bitmap_pmu_regs(pmc_idx, true); + /* Test if clearing the bit in those registers works */ + test_bitmap_pmu_regs(pmc_idx, false); + + /* + * Tests for reading/writing the event type register. + */ + + /* + * Set the event type register to an arbitrary value just for testing + * of reading/writing the register. + * Arm ARM says that for the event from 0x0000 to 0x003F, + * the value indicated in the PMEVTYPER_EL0.evtCount field is + * the value written to the field even when the specified event + * is not supported. + */ + write_data = (ARMV8_PMU_EXCLUDE_EL1 | ARMV8_PMUV3_PERFCTR_INST_RETIRED); + acc->write_typer(pmc_idx, write_data); + read_data = acc->read_typer(pmc_idx); + __GUEST_ASSERT(read_data == write_data, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); + + /* + * Tests for reading/writing the event count register. + */ + + read_data = acc->read_cntr(pmc_idx); + + /* The count value must be 0, as it is disabled and reset */ + __GUEST_ASSERT(read_data == 0, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data); + + write_data = read_data + pmc_idx + 0x12345; + acc->write_cntr(pmc_idx, write_data); + read_data = acc->read_cntr(pmc_idx); + __GUEST_ASSERT(read_data == write_data, + "pmc_idx: 0x%lx; acc_idx: 0x%lx; read_data: 0x%lx; write_data: 0x%lx", + pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); +} + static void guest_sync_handler(struct ex_regs *regs) { uint64_t esr, ec; @@ -49,11 +299,14 @@ static void guest_sync_handler(struct ex_regs *regs) /* * The guest is configured with PMUv3 with @expected_pmcr_n number of * event counters. - * Check if @expected_pmcr_n is consistent with PMCR_EL0.N. + * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and + * if reading/writing PMU registers for implemented counters works + * as expected. */ static void guest_code(uint64_t expected_pmcr_n) { uint64_t pmcr, pmcr_n; + int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, "Expected PMCR.N: 0x%lx; ARMv8 general counters: 0x%lx", @@ -67,6 +320,15 @@ static void guest_code(uint64_t expected_pmcr_n) "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", expected_pmcr_n, pmcr_n); + /* + * Tests for reading/writing PMU registers for implemented counters. + * Use each combination of PMEVT{CNTR,TYPER}_EL0 accessor functions. + */ + for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { + for (pmc = 0; pmc < pmcr_n; pmc++) + test_access_pmc_regs(&pmc_accessors[i], pmc); + } + GUEST_DONE(); } @@ -179,7 +441,7 @@ static void test_create_vpmu_vm_with_pmcr_n(uint64_t pmcr_n, bool expect_fail) * Create a guest with one vCPU, set the PMCR_EL0.N for the vCPU to @pmcr_n, * and run the test. */ -static void run_test(uint64_t pmcr_n) +static void run_access_test(uint64_t pmcr_n) { uint64_t sp; struct kvm_vcpu *vcpu; @@ -246,7 +508,7 @@ int main(void) pmcr_n = get_pmcr_n_limit(); for (i = 0; i <= pmcr_n; i++) - run_test(i); + run_access_test(i); for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) run_error_test(i); -- cgit v1.2.3 From e1cc87206348f36fb78e417b5813f78f672a4aef Mon Sep 17 00:00:00 2001 From: Reiji Watanabe Date: Fri, 20 Oct 2023 21:40:51 +0000 Subject: KVM: selftests: aarch64: vPMU register test for unimplemented counters Add a new test case to the vpmu_counter_access test to check if PMU registers or their bits for unimplemented counters are not accessible or are RAZ, as expected. Signed-off-by: Reiji Watanabe Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-12-rananta@google.com [Oliver: fix issues relating to exception return address] Signed-off-by: Oliver Upton --- .../selftests/kvm/aarch64/vpmu_counter_access.c | 82 ++++++++++++++++++++-- .../selftests/kvm/include/aarch64/processor.h | 1 + 2 files changed, 76 insertions(+), 7 deletions(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index a579286b6f11..8694e89684bf 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -5,8 +5,9 @@ * Copyright (c) 2023 Google LLC. * * This test checks if the guest can see the same number of the PMU event - * counters (PMCR_EL0.N) that userspace sets, and if the guest can access - * those counters. + * counters (PMCR_EL0.N) that userspace sets, if the guest can access + * those counters, and if the guest is prevented from accessing any + * other counters. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -287,25 +288,74 @@ static void test_access_pmc_regs(struct pmc_accessor *acc, int pmc_idx) pmc_idx, PMC_ACC_TO_IDX(acc), read_data, write_data); } +#define INVALID_EC (-1ul) +uint64_t expected_ec = INVALID_EC; + static void guest_sync_handler(struct ex_regs *regs) { uint64_t esr, ec; esr = read_sysreg(esr_el1); ec = (esr >> ESR_EC_SHIFT) & ESR_EC_MASK; - __GUEST_ASSERT(0, "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx", regs->pc, esr, ec); + + __GUEST_ASSERT(expected_ec == ec, + "PC: 0x%lx; ESR: 0x%lx; EC: 0x%lx; EC expected: 0x%lx", + regs->pc, esr, ec, expected_ec); + + /* skip the trapping instruction */ + regs->pc += 4; + + /* Use INVALID_EC to indicate an exception occurred */ + expected_ec = INVALID_EC; +} + +/* + * Run the given operation that should trigger an exception with the + * given exception class. The exception handler (guest_sync_handler) + * will reset op_end_addr to 0, expected_ec to INVALID_EC, and skip + * the instruction that trapped. + */ +#define TEST_EXCEPTION(ec, ops) \ +({ \ + GUEST_ASSERT(ec != INVALID_EC); \ + WRITE_ONCE(expected_ec, ec); \ + dsb(ish); \ + ops; \ + GUEST_ASSERT(expected_ec == INVALID_EC); \ +}) + +/* + * Tests for reading/writing registers for the unimplemented event counter + * specified by @pmc_idx (>= PMCR_EL0.N). + */ +static void test_access_invalid_pmc_regs(struct pmc_accessor *acc, int pmc_idx) +{ + /* + * Reading/writing the event count/type registers should cause + * an UNDEFINED exception. + */ + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_cntr(pmc_idx)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_cntr(pmc_idx, 0)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->read_typer(pmc_idx)); + TEST_EXCEPTION(ESR_EC_UNKNOWN, acc->write_typer(pmc_idx, 0)); + /* + * The bit corresponding to the (unimplemented) counter in + * {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers should be RAZ. + */ + test_bitmap_pmu_regs(pmc_idx, 1); + test_bitmap_pmu_regs(pmc_idx, 0); } /* * The guest is configured with PMUv3 with @expected_pmcr_n number of * event counters. * Check if @expected_pmcr_n is consistent with PMCR_EL0.N, and - * if reading/writing PMU registers for implemented counters works - * as expected. + * if reading/writing PMU registers for implemented or unimplemented + * counters works as expected. */ static void guest_code(uint64_t expected_pmcr_n) { - uint64_t pmcr, pmcr_n; + uint64_t pmcr, pmcr_n, unimp_mask; int i, pmc; __GUEST_ASSERT(expected_pmcr_n <= ARMV8_PMU_MAX_GENERAL_COUNTERS, @@ -320,15 +370,33 @@ static void guest_code(uint64_t expected_pmcr_n) "Expected PMCR.N: 0x%lx, PMCR.N: 0x%lx", expected_pmcr_n, pmcr_n); + /* + * Make sure that (RAZ) bits corresponding to unimplemented event + * counters in {PMCNTEN,PMINTEN,PMOVS}{SET,CLR} registers are reset + * to zero. + * (NOTE: bits for implemented event counters are reset to UNKNOWN) + */ + unimp_mask = GENMASK_ULL(ARMV8_PMU_MAX_GENERAL_COUNTERS - 1, pmcr_n); + check_bitmap_pmu_regs(unimp_mask, false); + /* * Tests for reading/writing PMU registers for implemented counters. - * Use each combination of PMEVT{CNTR,TYPER}_EL0 accessor functions. + * Use each combination of PMEV{CNTR,TYPER}_EL0 accessor functions. */ for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { for (pmc = 0; pmc < pmcr_n; pmc++) test_access_pmc_regs(&pmc_accessors[i], pmc); } + /* + * Tests for reading/writing PMU registers for unimplemented counters. + * Use each combination of PMEV{CNTR,TYPER}_EL0 accessor functions. + */ + for (i = 0; i < ARRAY_SIZE(pmc_accessors); i++) { + for (pmc = pmcr_n; pmc < ARMV8_PMU_MAX_GENERAL_COUNTERS; pmc++) + test_access_invalid_pmc_regs(&pmc_accessors[i], pmc); + } + GUEST_DONE(); } diff --git a/tools/testing/selftests/kvm/include/aarch64/processor.h b/tools/testing/selftests/kvm/include/aarch64/processor.h index cb537253a6b9..c42d683102c7 100644 --- a/tools/testing/selftests/kvm/include/aarch64/processor.h +++ b/tools/testing/selftests/kvm/include/aarch64/processor.h @@ -104,6 +104,7 @@ enum { #define ESR_EC_SHIFT 26 #define ESR_EC_MASK (ESR_EC_NUM - 1) +#define ESR_EC_UNKNOWN 0x0 #define ESR_EC_SVC64 0x15 #define ESR_EC_IABT 0x21 #define ESR_EC_DABT 0x25 -- cgit v1.2.3 From 62708be351fe7b06be6f6fd30e95c94095bf21d3 Mon Sep 17 00:00:00 2001 From: Raghavendra Rao Ananta Date: Fri, 20 Oct 2023 21:40:52 +0000 Subject: KVM: selftests: aarch64: vPMU test for validating user accesses Add a vPMU test scenario to validate the userspace accesses for the registers PM{C,I}NTEN{SET,CLR} and PMOVS{SET,CLR} to ensure that KVM honors the architectural definitions of these registers for a given PMCR.N. Signed-off-by: Raghavendra Rao Ananta Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231020214053.2144305-13-rananta@google.com Signed-off-by: Oliver Upton --- .../selftests/kvm/aarch64/vpmu_counter_access.c | 87 +++++++++++++++++++++- 1 file changed, 86 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c index 8694e89684bf..5ea78986e665 100644 --- a/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c +++ b/tools/testing/selftests/kvm/aarch64/vpmu_counter_access.c @@ -8,6 +8,8 @@ * counters (PMCR_EL0.N) that userspace sets, if the guest can access * those counters, and if the guest is prevented from accessing any * other counters. + * It also checks if the userspace accesses to the PMU regsisters honor the + * PMCR.N value that's set for the guest. * This test runs only when KVM_CAP_ARM_PMU_V3 is supported on the host. */ #include @@ -20,6 +22,9 @@ /* The max number of the PMU event counters (excluding the cycle counter) */ #define ARMV8_PMU_MAX_GENERAL_COUNTERS (ARMV8_PMU_MAX_COUNTERS - 1) +/* The cycle counter bit position that's common among the PMU registers */ +#define ARMV8_PMU_CYCLE_IDX 31 + struct vpmu_vm { struct kvm_vm *vm; struct kvm_vcpu *vcpu; @@ -28,6 +33,13 @@ struct vpmu_vm { static struct vpmu_vm vpmu_vm; +struct pmreg_sets { + uint64_t set_reg_id; + uint64_t clr_reg_id; +}; + +#define PMREG_SET(set, clr) {.set_reg_id = set, .clr_reg_id = clr} + static uint64_t get_pmcr_n(uint64_t pmcr) { return (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK; @@ -39,6 +51,15 @@ static void set_pmcr_n(uint64_t *pmcr, uint64_t pmcr_n) *pmcr |= (pmcr_n << ARMV8_PMU_PMCR_N_SHIFT); } +static uint64_t get_counters_mask(uint64_t n) +{ + uint64_t mask = BIT(ARMV8_PMU_CYCLE_IDX); + + if (n) + mask |= GENMASK(n - 1, 0); + return mask; +} + /* Read PMEVTCNTR_EL0 through PMXEVCNTR_EL0 */ static inline unsigned long read_sel_evcntr(int sel) { @@ -541,6 +562,68 @@ static void run_access_test(uint64_t pmcr_n) destroy_vpmu_vm(); } +static struct pmreg_sets validity_check_reg_sets[] = { + PMREG_SET(SYS_PMCNTENSET_EL0, SYS_PMCNTENCLR_EL0), + PMREG_SET(SYS_PMINTENSET_EL1, SYS_PMINTENCLR_EL1), + PMREG_SET(SYS_PMOVSSET_EL0, SYS_PMOVSCLR_EL0), +}; + +/* + * Create a VM, and check if KVM handles the userspace accesses of + * the PMU register sets in @validity_check_reg_sets[] correctly. + */ +static void run_pmregs_validity_test(uint64_t pmcr_n) +{ + int i; + struct kvm_vcpu *vcpu; + uint64_t set_reg_id, clr_reg_id, reg_val; + uint64_t valid_counters_mask, max_counters_mask; + + test_create_vpmu_vm_with_pmcr_n(pmcr_n, false); + vcpu = vpmu_vm.vcpu; + + valid_counters_mask = get_counters_mask(pmcr_n); + max_counters_mask = get_counters_mask(ARMV8_PMU_MAX_COUNTERS); + + for (i = 0; i < ARRAY_SIZE(validity_check_reg_sets); i++) { + set_reg_id = validity_check_reg_sets[i].set_reg_id; + clr_reg_id = validity_check_reg_sets[i].clr_reg_id; + + /* + * Test if the 'set' and 'clr' variants of the registers + * are initialized based on the number of valid counters. + */ + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Initial read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(set_reg_id), reg_val); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Initial read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(clr_reg_id), reg_val); + + /* + * Using the 'set' variant, force-set the register to the + * max number of possible counters and test if KVM discards + * the bits for unimplemented counters as it should. + */ + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), max_counters_mask); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(set_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Read of set_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(set_reg_id), reg_val); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(clr_reg_id), ®_val); + TEST_ASSERT((reg_val & (~valid_counters_mask)) == 0, + "Read of clr_reg: 0x%llx has unimplemented counters enabled: 0x%lx\n", + KVM_ARM64_SYS_REG(clr_reg_id), reg_val); + } + + destroy_vpmu_vm(); +} + /* * Create a guest with one vCPU, and attempt to set the PMCR_EL0.N for * the vCPU to @pmcr_n, which is larger than the host value. @@ -575,8 +658,10 @@ int main(void) TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_PMU_V3)); pmcr_n = get_pmcr_n_limit(); - for (i = 0; i <= pmcr_n; i++) + for (i = 0; i <= pmcr_n; i++) { run_access_test(i); + run_pmregs_validity_test(i); + } for (i = pmcr_n + 1; i < ARMV8_PMU_MAX_COUNTERS; i++) run_error_test(i); -- cgit v1.2.3 From d5cb781b77416a9ed4129960712932dbcf8a9f30 Mon Sep 17 00:00:00 2001 From: Miguel Luis Date: Mon, 23 Oct 2023 10:54:40 +0100 Subject: arm64: Add missing _EL12 encodings Some _EL12 encodings are missing. Add them. Reviewed-by: Eric Auger Signed-off-by: Miguel Luis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231023095444.1587322-2-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/sysreg.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 38296579a4fd..ba5db50effec 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -567,19 +567,30 @@ #define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) /* VHE encodings for architectural EL0/1 system registers */ +#define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0) #define SYS_SCTLR_EL12 sys_reg(3, 5, 1, 0, 0) +#define SYS_CPACR_EL12 sys_reg(3, 5, 1, 0, 2) +#define SYS_SCTLR2_EL12 sys_reg(3, 5, 1, 0, 3) +#define SYS_ZCR_EL12 sys_reg(3, 5, 1, 2, 0) +#define SYS_TRFCR_EL12 sys_reg(3, 5, 1, 2, 1) +#define SYS_SMCR_EL12 sys_reg(3, 5, 1, 2, 6) #define SYS_TTBR0_EL12 sys_reg(3, 5, 2, 0, 0) #define SYS_TTBR1_EL12 sys_reg(3, 5, 2, 0, 1) #define SYS_TCR_EL12 sys_reg(3, 5, 2, 0, 2) +#define SYS_TCR2_EL12 sys_reg(3, 5, 2, 0, 3) #define SYS_SPSR_EL12 sys_reg(3, 5, 4, 0, 0) #define SYS_ELR_EL12 sys_reg(3, 5, 4, 0, 1) #define SYS_AFSR0_EL12 sys_reg(3, 5, 5, 1, 0) #define SYS_AFSR1_EL12 sys_reg(3, 5, 5, 1, 1) #define SYS_ESR_EL12 sys_reg(3, 5, 5, 2, 0) #define SYS_TFSR_EL12 sys_reg(3, 5, 5, 6, 0) +#define SYS_FAR_EL12 sys_reg(3, 5, 6, 0, 0) +#define SYS_PMSCR_EL12 sys_reg(3, 5, 9, 9, 0) #define SYS_MAIR_EL12 sys_reg(3, 5, 10, 2, 0) #define SYS_AMAIR_EL12 sys_reg(3, 5, 10, 3, 0) #define SYS_VBAR_EL12 sys_reg(3, 5, 12, 0, 0) +#define SYS_CONTEXTIDR_EL12 sys_reg(3, 5, 13, 0, 1) +#define SYS_SCXTNUM_EL12 sys_reg(3, 5, 13, 0, 7) #define SYS_CNTKCTL_EL12 sys_reg(3, 5, 14, 1, 0) #define SYS_CNTP_TVAL_EL02 sys_reg(3, 5, 14, 2, 0) #define SYS_CNTP_CTL_EL02 sys_reg(3, 5, 14, 2, 1) -- cgit v1.2.3 From 41f6c9344713e6b895a77f2bdd596cf56d4cc197 Mon Sep 17 00:00:00 2001 From: Miguel Luis Date: Mon, 23 Oct 2023 10:54:41 +0100 Subject: arm64: Add missing _EL2 encodings Some _EL2 encodings are missing. Add them. Signed-off-by: Miguel Luis Reviewed-by: Eric Auger [maz: dropped secure encodings] Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231023095444.1587322-3-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/sysreg.h | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index ba5db50effec..4a20a7dc5bc4 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -270,6 +270,8 @@ /* ETM */ #define SYS_TRCOSLAR sys_reg(2, 1, 1, 0, 4) +#define SYS_BRBCR_EL2 sys_reg(2, 4, 9, 0, 0) + #define SYS_MIDR_EL1 sys_reg(3, 0, 0, 0, 0) #define SYS_MPIDR_EL1 sys_reg(3, 0, 0, 0, 5) #define SYS_REVIDR_EL1 sys_reg(3, 0, 0, 0, 6) @@ -484,6 +486,7 @@ #define SYS_SCTLR_EL2 sys_reg(3, 4, 1, 0, 0) #define SYS_ACTLR_EL2 sys_reg(3, 4, 1, 0, 1) +#define SYS_SCTLR2_EL2 sys_reg(3, 4, 1, 0, 3) #define SYS_HCR_EL2 sys_reg(3, 4, 1, 1, 0) #define SYS_MDCR_EL2 sys_reg(3, 4, 1, 1, 1) #define SYS_CPTR_EL2 sys_reg(3, 4, 1, 1, 2) @@ -497,6 +500,7 @@ #define SYS_VTCR_EL2 sys_reg(3, 4, 2, 1, 2) #define SYS_TRFCR_EL2 sys_reg(3, 4, 1, 2, 1) +#define SYS_VNCR_EL2 sys_reg(3, 4, 2, 2, 0) #define SYS_HAFGRTR_EL2 sys_reg(3, 4, 3, 1, 6) #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) @@ -514,6 +518,18 @@ #define SYS_MAIR_EL2 sys_reg(3, 4, 10, 2, 0) #define SYS_AMAIR_EL2 sys_reg(3, 4, 10, 3, 0) +#define SYS_MPAMHCR_EL2 sys_reg(3, 4, 10, 4, 0) +#define SYS_MPAMVPMV_EL2 sys_reg(3, 4, 10, 4, 1) +#define SYS_MPAM2_EL2 sys_reg(3, 4, 10, 5, 0) +#define __SYS__MPAMVPMx_EL2(x) sys_reg(3, 4, 10, 6, x) +#define SYS_MPAMVPM0_EL2 __SYS__MPAMVPMx_EL2(0) +#define SYS_MPAMVPM1_EL2 __SYS__MPAMVPMx_EL2(1) +#define SYS_MPAMVPM2_EL2 __SYS__MPAMVPMx_EL2(2) +#define SYS_MPAMVPM3_EL2 __SYS__MPAMVPMx_EL2(3) +#define SYS_MPAMVPM4_EL2 __SYS__MPAMVPMx_EL2(4) +#define SYS_MPAMVPM5_EL2 __SYS__MPAMVPMx_EL2(5) +#define SYS_MPAMVPM6_EL2 __SYS__MPAMVPMx_EL2(6) +#define SYS_MPAMVPM7_EL2 __SYS__MPAMVPMx_EL2(7) #define SYS_VBAR_EL2 sys_reg(3, 4, 12, 0, 0) #define SYS_RVBAR_EL2 sys_reg(3, 4, 12, 0, 1) @@ -562,9 +578,23 @@ #define SYS_CONTEXTIDR_EL2 sys_reg(3, 4, 13, 0, 1) #define SYS_TPIDR_EL2 sys_reg(3, 4, 13, 0, 2) +#define SYS_SCXTNUM_EL2 sys_reg(3, 4, 13, 0, 7) + +#define __AMEV_op2(m) (m & 0x7) +#define __AMEV_CRm(n, m) (n | ((m & 0x8) >> 3)) +#define __SYS__AMEVCNTVOFF0n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0x8, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF0n_EL2(m) __SYS__AMEVCNTVOFF0n_EL2(m) +#define __SYS__AMEVCNTVOFF1n_EL2(m) sys_reg(3, 4, 13, __AMEV_CRm(0xA, m), __AMEV_op2(m)) +#define SYS_AMEVCNTVOFF1n_EL2(m) __SYS__AMEVCNTVOFF1n_EL2(m) #define SYS_CNTVOFF_EL2 sys_reg(3, 4, 14, 0, 3) #define SYS_CNTHCTL_EL2 sys_reg(3, 4, 14, 1, 0) +#define SYS_CNTHP_TVAL_EL2 sys_reg(3, 4, 14, 2, 0) +#define SYS_CNTHP_CTL_EL2 sys_reg(3, 4, 14, 2, 1) +#define SYS_CNTHP_CVAL_EL2 sys_reg(3, 4, 14, 2, 2) +#define SYS_CNTHV_TVAL_EL2 sys_reg(3, 4, 14, 3, 0) +#define SYS_CNTHV_CTL_EL2 sys_reg(3, 4, 14, 3, 1) +#define SYS_CNTHV_CVAL_EL2 sys_reg(3, 4, 14, 3, 2) /* VHE encodings for architectural EL0/1 system registers */ #define SYS_BRBCR_EL12 sys_reg(2, 5, 9, 0, 0) -- cgit v1.2.3 From 04cf5465055442c15c37bbedb1febe2d8f3a47be Mon Sep 17 00:00:00 2001 From: Miguel Luis Date: Mon, 23 Oct 2023 10:54:42 +0100 Subject: KVM: arm64: Refine _EL2 system register list that require trap reinjection Implement a fine grained approach in the _EL2 sysreg range instead of the current wide cast trap. This ensures that we don't mistakenly inject the wrong exception into the guest. [maz: commit message massaging, dropped secure and AArch32 registers from the list] Fixes: d0fc0a2519a6 ("KVM: arm64: nv: Add trap forwarding for HCR_EL2") Reviewed-by: Eric Auger Signed-off-by: Miguel Luis Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231023095444.1587322-4-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/emulate-nested.c | 77 +++++++++++++++++++++++++++++++++++++---- 1 file changed, 71 insertions(+), 6 deletions(-) diff --git a/arch/arm64/kvm/emulate-nested.c b/arch/arm64/kvm/emulate-nested.c index 9ced1bf0c2b7..91cc851a6c75 100644 --- a/arch/arm64/kvm/emulate-nested.c +++ b/arch/arm64/kvm/emulate-nested.c @@ -648,15 +648,80 @@ static const struct encoding_to_trap_config encoding_to_cgt[] __initconst = { SR_TRAP(SYS_APGAKEYLO_EL1, CGT_HCR_APK), SR_TRAP(SYS_APGAKEYHI_EL1, CGT_HCR_APK), /* All _EL2 registers */ - SR_RANGE_TRAP(sys_reg(3, 4, 0, 0, 0), - sys_reg(3, 4, 3, 15, 7), CGT_HCR_NV), + SR_TRAP(SYS_BRBCR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VPIDR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VMPIDR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_SCTLR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ACTLR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_SCTLR2_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_HCR_EL2, + SYS_HCRX_EL2, CGT_HCR_NV), + SR_TRAP(SYS_SMPRIMAP_EL2, CGT_HCR_NV), + SR_TRAP(SYS_SMCR_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_TTBR0_EL2, + SYS_TCR2_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VTTBR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VTCR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VNCR_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_HDFGRTR_EL2, + SYS_HAFGRTR_EL2, CGT_HCR_NV), /* Skip the SP_EL1 encoding... */ SR_TRAP(SYS_SPSR_EL2, CGT_HCR_NV), SR_TRAP(SYS_ELR_EL2, CGT_HCR_NV), - SR_RANGE_TRAP(sys_reg(3, 4, 4, 1, 1), - sys_reg(3, 4, 10, 15, 7), CGT_HCR_NV), - SR_RANGE_TRAP(sys_reg(3, 4, 12, 0, 0), - sys_reg(3, 4, 14, 15, 7), CGT_HCR_NV), + /* Skip SPSR_irq, SPSR_abt, SPSR_und, SPSR_fiq */ + SR_TRAP(SYS_AFSR0_EL2, CGT_HCR_NV), + SR_TRAP(SYS_AFSR1_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ESR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VSESR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_TFSR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_FAR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_HPFAR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_PMSCR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_MAIR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_AMAIR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_MPAMHCR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_MPAMVPMV_EL2, CGT_HCR_NV), + SR_TRAP(SYS_MPAM2_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_MPAMVPM0_EL2, + SYS_MPAMVPM7_EL2, CGT_HCR_NV), + /* + * Note that the spec. describes a group of MEC registers + * whose access should not trap, therefore skip the following: + * MECID_A0_EL2, MECID_A1_EL2, MECID_P0_EL2, + * MECID_P1_EL2, MECIDR_EL2, VMECID_A_EL2, + * VMECID_P_EL2. + */ + SR_RANGE_TRAP(SYS_VBAR_EL2, + SYS_RMR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_VDISR_EL2, CGT_HCR_NV), + /* ICH_AP0R_EL2 */ + SR_RANGE_TRAP(SYS_ICH_AP0R0_EL2, + SYS_ICH_AP0R3_EL2, CGT_HCR_NV), + /* ICH_AP1R_EL2 */ + SR_RANGE_TRAP(SYS_ICH_AP1R0_EL2, + SYS_ICH_AP1R3_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ICC_SRE_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_ICH_HCR_EL2, + SYS_ICH_EISR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ICH_ELRSR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_ICH_VMCR_EL2, CGT_HCR_NV), + /* ICH_LR_EL2 */ + SR_RANGE_TRAP(SYS_ICH_LR0_EL2, + SYS_ICH_LR15_EL2, CGT_HCR_NV), + SR_TRAP(SYS_CONTEXTIDR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_TPIDR_EL2, CGT_HCR_NV), + SR_TRAP(SYS_SCXTNUM_EL2, CGT_HCR_NV), + /* AMEVCNTVOFF0_EL2, AMEVCNTVOFF1_EL2 */ + SR_RANGE_TRAP(SYS_AMEVCNTVOFF0n_EL2(0), + SYS_AMEVCNTVOFF1n_EL2(15), CGT_HCR_NV), + /* CNT*_EL2 */ + SR_TRAP(SYS_CNTVOFF_EL2, CGT_HCR_NV), + SR_TRAP(SYS_CNTPOFF_EL2, CGT_HCR_NV), + SR_TRAP(SYS_CNTHCTL_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_CNTHP_TVAL_EL2, + SYS_CNTHP_CVAL_EL2, CGT_HCR_NV), + SR_RANGE_TRAP(SYS_CNTHV_TVAL_EL2, + SYS_CNTHV_CVAL_EL2, CGT_HCR_NV), /* All _EL02, _EL12 registers */ SR_RANGE_TRAP(sys_reg(3, 5, 0, 0, 0), sys_reg(3, 5, 10, 15, 7), CGT_HCR_NV), -- cgit v1.2.3 From c7d11a61c7f7de75e4e269485644ea8c5a4e0bf6 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 23 Oct 2023 10:54:43 +0100 Subject: KVM: arm64: Do not let a L1 hypervisor access the *32_EL2 sysregs DBGVCR32_EL2, DACR32_EL2, IFSR32_EL2 and FPEXC32_EL2 are required to UNDEF when AArch32 isn't implemented, which is definitely the case when running NV. Given that this is the only case where these registers can trap, unconditionally inject an UNDEF exception. Signed-off-by: Marc Zyngier Reviewed-by: Oliver Upton Reviewed-by: Eric Auger Link: https://lore.kernel.org/r/20231023095444.1587322-5-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/kvm/sys_regs.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index e92ec810d449..0f8690027a0f 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1961,7 +1961,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { // DBGDTR[TR]X_EL0 share the same encoding { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi }, - { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 }, + { SYS_DESC(SYS_DBGVCR32_EL2), trap_undef, reset_val, DBGVCR32_EL2, 0 }, { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 }, @@ -2380,18 +2380,18 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(VTTBR_EL2, access_rw, reset_val, 0), EL2_REG(VTCR_EL2, access_rw, reset_val, 0), - { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 }, + { SYS_DESC(SYS_DACR32_EL2), trap_undef, reset_unknown, DACR32_EL2 }, EL2_REG(HDFGRTR_EL2, access_rw, reset_val, 0), EL2_REG(HDFGWTR_EL2, access_rw, reset_val, 0), EL2_REG(SPSR_EL2, access_rw, reset_val, 0), EL2_REG(ELR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_SP_EL1), access_sp_el1}, - { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 }, + { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 }, EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), EL2_REG(ESR_EL2, access_rw, reset_val, 0), - { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 }, + { SYS_DESC(SYS_FPEXC32_EL2), trap_undef, reset_val, FPEXC32_EL2, 0x700 }, EL2_REG(FAR_EL2, access_rw, reset_val, 0), EL2_REG(HPFAR_EL2, access_rw, reset_val, 0), -- cgit v1.2.3 From 3f7915ccc90261c201664b5fdce139db09480a36 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Mon, 23 Oct 2023 10:54:44 +0100 Subject: KVM: arm64: Handle AArch32 SPSR_{irq,abt,und,fiq} as RAZ/WI When trapping accesses from a NV guest that tries to access SPSR_{irq,abt,und,fiq}, make sure we handle them as RAZ/WI, as if AArch32 wasn't implemented. This involves a bit of repainting to make the visibility handler more generic. Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20231023095444.1587322-6-maz@kernel.org Signed-off-by: Oliver Upton --- arch/arm64/include/asm/sysreg.h | 4 ++++ arch/arm64/kvm/sys_regs.c | 16 +++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 4a20a7dc5bc4..5e65f51c10d2 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -505,6 +505,10 @@ #define SYS_SPSR_EL2 sys_reg(3, 4, 4, 0, 0) #define SYS_ELR_EL2 sys_reg(3, 4, 4, 0, 1) #define SYS_SP_EL1 sys_reg(3, 4, 4, 1, 0) +#define SYS_SPSR_irq sys_reg(3, 4, 4, 3, 0) +#define SYS_SPSR_abt sys_reg(3, 4, 4, 3, 1) +#define SYS_SPSR_und sys_reg(3, 4, 4, 3, 2) +#define SYS_SPSR_fiq sys_reg(3, 4, 4, 3, 3) #define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) #define SYS_AFSR0_EL2 sys_reg(3, 4, 5, 1, 0) #define SYS_AFSR1_EL2 sys_reg(3, 4, 5, 1, 1) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 0f8690027a0f..16d2b2c4bada 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -1791,8 +1791,8 @@ static unsigned int el2_visibility(const struct kvm_vcpu *vcpu, * HCR_EL2.E2H==1, and only in the sysreg table for convenience of * handling traps. Given that, they are always hidden from userspace. */ -static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu, - const struct sys_reg_desc *rd) +static unsigned int hidden_user_visibility(const struct kvm_vcpu *vcpu, + const struct sys_reg_desc *rd) { return REG_HIDDEN_USER; } @@ -1803,7 +1803,7 @@ static unsigned int elx2_visibility(const struct kvm_vcpu *vcpu, .reset = rst, \ .reg = name##_EL1, \ .val = v, \ - .visibility = elx2_visibility, \ + .visibility = hidden_user_visibility, \ } /* @@ -2387,6 +2387,16 @@ static const struct sys_reg_desc sys_reg_descs[] = { EL2_REG(ELR_EL2, access_rw, reset_val, 0), { SYS_DESC(SYS_SP_EL1), access_sp_el1}, + /* AArch32 SPSR_* are RES0 if trapped from a NV guest */ + { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi, + .visibility = hidden_user_visibility }, + { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi, + .visibility = hidden_user_visibility }, + { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi, + .visibility = hidden_user_visibility }, + { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi, + .visibility = hidden_user_visibility }, + { SYS_DESC(SYS_IFSR32_EL2), trap_undef, reset_unknown, IFSR32_EL2 }, EL2_REG(AFSR0_EL2, access_rw, reset_val, 0), EL2_REG(AFSR1_EL2, access_rw, reset_val, 0), -- cgit v1.2.3 From be097997a273259f1723baac5463cf19d8564efa Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 22 Sep 2023 22:32:29 +0000 Subject: KVM: arm64: Always invalidate TLB for stage-2 permission faults It is possible for multiple vCPUs to fault on the same IPA and attempt to resolve the fault. One of the page table walks will actually update the PTE and the rest will return -EAGAIN per our race detection scheme. KVM elides the TLB invalidation on the racing threads as the return value is nonzero. Before commit a12ab1378a88 ("KVM: arm64: Use local TLBI on permission relaxation") KVM always used broadcast TLB invalidations when handling permission faults, which had the convenient property of making the stage-2 updates visible to all CPUs in the system. However now we do a local invalidation, and TLBI elision leads to the vCPU thread faulting again on the stale entry. Remember that the architecture permits the TLB to cache translations that precipitate a permission fault. Invalidate the TLB entry responsible for the permission fault if the stage-2 descriptor has been relaxed, regardless of which thread actually did the job. Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20230922223229.1608155-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/hyp/pgtable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/pgtable.c b/arch/arm64/kvm/hyp/pgtable.c index f155b8c9e98c..286888751793 100644 --- a/arch/arm64/kvm/hyp/pgtable.c +++ b/arch/arm64/kvm/hyp/pgtable.c @@ -1314,7 +1314,7 @@ int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level, KVM_PGTABLE_WALK_HANDLE_FAULT | KVM_PGTABLE_WALK_SHARED); - if (!ret) + if (!ret || ret == -EAGAIN) kvm_call_hyp(__kvm_tlb_flush_vmid_ipa_nsh, pgt->mmu, addr, level); return ret; } -- cgit v1.2.3 From beaf35b480875d05d7c751d50951a659ce6dff94 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sat, 7 Oct 2023 20:40:42 +0800 Subject: KVM: arm64: selftest: Add the missing .guest_prepare() Running page_fault_test on a Cortex A72 fails with Test: ro_memslot_no_syndrome_guest_cas Testing guest mode: PA-bits:40, VA-bits:48, 4K pages Testing memory backing src type: anonymous ==== Test Assertion Failure ==== aarch64/page_fault_test.c:117: guest_check_lse() pid=1944087 tid=1944087 errno=4 - Interrupted system call 1 0x00000000004028b3: vcpu_run_loop at page_fault_test.c:682 2 0x0000000000402d93: run_test at page_fault_test.c:731 3 0x0000000000403957: for_each_guest_mode at guest_modes.c:100 4 0x00000000004019f3: for_each_test_and_guest_mode at page_fault_test.c:1108 5 (inlined by) main at page_fault_test.c:1134 6 0x0000ffff868e503b: ?? ??:0 7 0x0000ffff868e5113: ?? ??:0 8 0x0000000000401aaf: _start at ??:? guest_check_lse() because we don't have a guest_prepare stage to check the presence of FEAT_LSE and skip the related guest_cas testing, and we end-up failing in GUEST_ASSERT(guest_check_lse()). Add the missing .guest_prepare() where it's indeed required. Signed-off-by: Zenghui Yu Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231007124043.626-1-yuzenghui@huawei.com Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/aarch64/page_fault_test.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index 47bb914ab2fa..eb5ead50c0b1 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -842,6 +842,7 @@ static void help(char *name) .name = SCAT2(ro_memslot_no_syndrome, _access), \ .data_memslot_flags = KVM_MEM_READONLY, \ .pt_memslot_flags = KVM_MEM_READONLY, \ + .guest_prepare = { _PREPARE(_access) }, \ .guest_test = _access, \ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \ .expected_events = { .fail_vcpu_runs = 1 }, \ @@ -865,6 +866,7 @@ static void help(char *name) .name = SCAT2(ro_memslot_no_syn_and_dlog, _access), \ .data_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ .pt_memslot_flags = KVM_MEM_READONLY | KVM_MEM_LOG_DIRTY_PAGES, \ + .guest_prepare = { _PREPARE(_access) }, \ .guest_test = _access, \ .guest_test_check = { _test_check }, \ .fail_vcpu_run_handler = fail_vcpu_run_mmio_no_syndrome_handler, \ @@ -894,6 +896,7 @@ static void help(char *name) .data_memslot_flags = KVM_MEM_READONLY, \ .pt_memslot_flags = KVM_MEM_READONLY, \ .mem_mark_cmd = CMD_HOLE_DATA | CMD_HOLE_PT, \ + .guest_prepare = { _PREPARE(_access) }, \ .guest_test = _access, \ .uffd_data_handler = _uffd_data_handler, \ .uffd_pt_handler = uffd_pt_handler, \ -- cgit v1.2.3 From 06899aa5dd3d76e888b28d7e8d7304c0a7ec6262 Mon Sep 17 00:00:00 2001 From: Zenghui Yu Date: Sat, 7 Oct 2023 20:40:43 +0800 Subject: KVM: arm64: selftest: Perform ISB before reading PAR_EL1 It looks like a mistake to issue ISB *after* reading PAR_EL1, we should instead perform it between the AT instruction and the reads of PAR_EL1. As according to DDI0487J.a IJTYVP, "When an address translation instruction is executed, explicit synchronization is required to guarantee the result is visible to subsequent direct reads of PAR_EL1." Otherwise all guest_at testcases fail on my box with ==== Test Assertion Failure ==== aarch64/page_fault_test.c:142: par & 1 == 0 pid=1355864 tid=1355864 errno=4 - Interrupted system call 1 0x0000000000402853: vcpu_run_loop at page_fault_test.c:681 2 0x0000000000402cdb: run_test at page_fault_test.c:730 3 0x0000000000403897: for_each_guest_mode at guest_modes.c:100 4 0x00000000004019f3: for_each_test_and_guest_mode at page_fault_test.c:1105 5 (inlined by) main at page_fault_test.c:1131 6 0x0000ffffb153c03b: ?? ??:0 7 0x0000ffffb153c113: ?? ??:0 8 0x0000000000401aaf: _start at ??:? 0x1 != 0x0 (par & 1 != 0) Signed-off-by: Zenghui Yu Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231007124043.626-2-yuzenghui@huawei.com Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/aarch64/page_fault_test.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/kvm/aarch64/page_fault_test.c b/tools/testing/selftests/kvm/aarch64/page_fault_test.c index eb5ead50c0b1..9a3be80e8bc9 100644 --- a/tools/testing/selftests/kvm/aarch64/page_fault_test.c +++ b/tools/testing/selftests/kvm/aarch64/page_fault_test.c @@ -135,8 +135,8 @@ static void guest_at(void) uint64_t par; asm volatile("at s1e1r, %0" :: "r" (guest_test_memory)); - par = read_sysreg(par_el1); isb(); + par = read_sysreg(par_el1); /* Bit 1 indicates whether the AT was successful */ GUEST_ASSERT_EQ(par & 1, 0); -- cgit v1.2.3 From d11974dc5f208ae1c0fe62a9bcb47c0cdcd7b081 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Thu, 26 Oct 2023 20:53:06 +0000 Subject: KVM: arm64: Add tracepoint for MMIO accesses where ISV==0 It is a pretty well known fact that KVM does not support MMIO emulation without valid instruction syndrome information (ESR_EL2.ISV == 0). The current kvm_pr_unimpl() is pretty useless, as it contains zero context to relate the event to a vCPU. Replace it with a precise tracepoint that dumps the relevant context so the user can make sense of what the guest is doing. Acked-by: Zenghui Yu Acked-by: Marc Zyngier Link: https://lore.kernel.org/r/20231026205306.3045075-1-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/mmio.c | 4 +++- arch/arm64/kvm/trace_arm.h | 25 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kvm/mmio.c b/arch/arm64/kvm/mmio.c index 3dd38a151d2a..200c8019a82a 100644 --- a/arch/arm64/kvm/mmio.c +++ b/arch/arm64/kvm/mmio.c @@ -135,6 +135,9 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) * volunteered to do so, and bail out otherwise. */ if (!kvm_vcpu_dabt_isvalid(vcpu)) { + trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu), + kvm_vcpu_get_hfar(vcpu), fault_ipa); + if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, &vcpu->kvm->arch.flags)) { run->exit_reason = KVM_EXIT_ARM_NISV; @@ -143,7 +146,6 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) return 0; } - kvm_pr_unimpl("Data abort outside memslots with no valid syndrome info\n"); return -ENOSYS; } diff --git a/arch/arm64/kvm/trace_arm.h b/arch/arm64/kvm/trace_arm.h index 8ad53104934d..c18c1a95831e 100644 --- a/arch/arm64/kvm/trace_arm.h +++ b/arch/arm64/kvm/trace_arm.h @@ -136,6 +136,31 @@ TRACE_EVENT(kvm_mmio_emulate, __entry->vcpu_pc, __entry->instr, __entry->cpsr) ); +TRACE_EVENT(kvm_mmio_nisv, + TP_PROTO(unsigned long vcpu_pc, unsigned long esr, + unsigned long far, unsigned long ipa), + TP_ARGS(vcpu_pc, esr, far, ipa), + + TP_STRUCT__entry( + __field( unsigned long, vcpu_pc ) + __field( unsigned long, esr ) + __field( unsigned long, far ) + __field( unsigned long, ipa ) + ), + + TP_fast_assign( + __entry->vcpu_pc = vcpu_pc; + __entry->esr = esr; + __entry->far = far; + __entry->ipa = ipa; + ), + + TP_printk("ipa %#016lx, esr %#016lx, far %#016lx, pc %#016lx", + __entry->ipa, __entry->esr, + __entry->far, __entry->vcpu_pc) +); + + TRACE_EVENT(kvm_set_way_flush, TP_PROTO(unsigned long vcpu_pc, bool cache), TP_ARGS(vcpu_pc, cache), -- cgit v1.2.3 From fbb075c11663a6fb7beb4ef386e069e06594e229 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 27 Oct 2023 00:54:38 +0000 Subject: tools headers arm64: Fix references to top srcdir in Makefile Aishwarya reports that KVM selftests for arm64 fail with the following error: | make[4]: Entering directory '/tmp/kci/linux/tools/testing/selftests/kvm' | Makefile:270: warning: overriding recipe for target | '/tmp/kci/linux/build/kselftest/kvm/get-reg-list' | Makefile:265: warning: ignoring old recipe for target | '/tmp/kci/linux/build/kselftest/kvm/get-reg-list' | make -C ../../../../tools/arch/arm64/tools/ | make[5]: Entering directory '/tmp/kci/linux/tools/arch/arm64/tools' | Makefile:10: ../tools/scripts/Makefile.include: No such file or directory | make[5]: *** No rule to make target '../tools/scripts/Makefile.include'. | Stop. It would appear that this only affects builds from the top-level Makefile (e.g. make kselftest-all), as $(srctree) is set to ".". Work around the issue by shadowing the kselftest naming scheme for the source tree variable. Reported-by: Aishwarya TCV Fixes: 0359c946b131 ("tools headers arm64: Update sysreg.h with kernel sources") Link: https://lore.kernel.org/r/20231027005439.3142015-2-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/arch/arm64/tools/Makefile | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/arch/arm64/tools/Makefile b/tools/arch/arm64/tools/Makefile index f867e6036c62..7f64b8bb5107 100644 --- a/tools/arch/arm64/tools/Makefile +++ b/tools/arch/arm64/tools/Makefile @@ -1,13 +1,13 @@ # SPDX-License-Identifier: GPL-2.0 -ifeq ($(srctree),) -srctree := $(patsubst %/,%,$(dir $(CURDIR))) -srctree := $(patsubst %/,%,$(dir $(srctree))) -srctree := $(patsubst %/,%,$(dir $(srctree))) -srctree := $(patsubst %/,%,$(dir $(srctree))) +ifeq ($(top_srcdir),) +top_srcdir := $(patsubst %/,%,$(dir $(CURDIR))) +top_srcdir := $(patsubst %/,%,$(dir $(top_srcdir))) +top_srcdir := $(patsubst %/,%,$(dir $(top_srcdir))) +top_srcdir := $(patsubst %/,%,$(dir $(top_srcdir))) endif -include $(srctree)/tools/scripts/Makefile.include +include $(top_srcdir)/tools/scripts/Makefile.include AWK ?= awk MKDIR ?= mkdir @@ -19,10 +19,10 @@ else Q = @ endif -arm64_tools_dir = $(srctree)/arch/arm64/tools +arm64_tools_dir = $(top_srcdir)/arch/arm64/tools arm64_sysreg_tbl = $(arm64_tools_dir)/sysreg arm64_gen_sysreg = $(arm64_tools_dir)/gen-sysreg.awk -arm64_generated_dir = $(srctree)/tools/arch/arm64/include/generated +arm64_generated_dir = $(top_srcdir)/tools/arch/arm64/include/generated arm64_sysreg_defs = $(arm64_generated_dir)/asm/sysreg-defs.h all: $(arm64_sysreg_defs) -- cgit v1.2.3 From 70c7b704ca7251b09dd4ce22a31b5bea8c797d24 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Fri, 27 Oct 2023 00:54:39 +0000 Subject: KVM: selftests: Avoid using forced target for generating arm64 headers The 'prepare' target that generates the arm64 sysreg headers had no prerequisites, so it wound up forcing a rebuild of all KVM selftests each invocation. Add a rule for the generated headers and just have dependents use that for a prerequisite. Reported-by: Nina Schoetterl-Glausch Fixes: 9697d84cc3b6 ("KVM: selftests: Generate sysreg-defs.h and add to include path") Tested-by: Nina Schoetterl-Glausch Link: https://lore.kernel.org/r/20231027005439.3142015-3-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- tools/testing/selftests/kvm/Makefile | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile index 4f4f6ad025f4..4de096bbf124 100644 --- a/tools/testing/selftests/kvm/Makefile +++ b/tools/testing/selftests/kvm/Makefile @@ -22,10 +22,8 @@ arm64_tools_dir := $(top_srcdir)/tools/arch/arm64/tools/ GEN_HDRS := $(top_srcdir)/tools/arch/arm64/include/generated/ CFLAGS += -I$(GEN_HDRS) -prepare: +$(GEN_HDRS): $(wildcard $(arm64_tools_dir)/*) $(MAKE) -C $(arm64_tools_dir) -else -prepare: endif LIBKVM += lib/assert.c @@ -276,10 +274,10 @@ EXTRA_CLEAN += $(GEN_HDRS) \ cscope.* x := $(shell mkdir -p $(sort $(dir $(LIBKVM_C_OBJ) $(LIBKVM_S_OBJ)))) -$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c prepare +$(LIBKVM_C_OBJ): $(OUTPUT)/%.o: %.c $(GEN_HDRS) $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ -$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S prepare +$(LIBKVM_S_OBJ): $(OUTPUT)/%.o: %.S $(GEN_HDRS) $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c $< -o $@ # Compile the string overrides as freestanding to prevent the compiler from @@ -289,9 +287,10 @@ $(LIBKVM_STRING_OBJ): $(OUTPUT)/%.o: %.c $(CC) $(CFLAGS) $(CPPFLAGS) $(TARGET_ARCH) -c -ffreestanding $< -o $@ x := $(shell mkdir -p $(sort $(dir $(TEST_GEN_PROGS)))) +$(SPLIT_TESTS_OBJS): $(GEN_HDRS) $(TEST_GEN_PROGS): $(LIBKVM_OBJS) $(TEST_GEN_PROGS_EXTENDED): $(LIBKVM_OBJS) -$(TEST_GEN_OBJ): prepare +$(TEST_GEN_OBJ): $(GEN_HDRS) cscope: include_paths = $(LINUX_TOOL_INCLUDE) $(LINUX_HDR_PATH) include lib .. cscope: -- cgit v1.2.3