summaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-11-11 03:07:33 +0100
committerPeter Zijlstra <peterz@infradead.org>2021-11-17 14:49:10 +0100
commite1bfc24577cc65c95dc519d7621a9c985b97e567 (patch)
treee9c8dd7f6ec898bd5c699cdb923ebeb1b9193de7 /arch/x86/kvm/x86.c
parentKVM: x86: More precisely identify NMI from guest when handling PMI (diff)
downloadlinux-e1bfc24577cc65c95dc519d7621a9c985b97e567.tar.xz
linux-e1bfc24577cc65c95dc519d7621a9c985b97e567.zip
KVM: Move x86's perf guest info callbacks to generic KVM
Move x86's perf guest callbacks into common KVM, as they are semantically identical to arm64's callbacks (the only other such KVM callbacks). arm64 will convert to the common versions in a future patch. Implement the necessary arm64 arch hooks now to avoid having to provide stubs or a temporary #define (from x86) to avoid arm64 compilation errors when CONFIG_GUEST_PERF_EVENTS=y. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Acked-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211111020738.2512932-13-seanjc@google.com
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c53
1 files changed, 11 insertions, 42 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ab032ef7879f..32cb6f9ca077 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8469,43 +8469,12 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
-{
- return vcpu && vcpu->arch.handling_intr_from_guest;
-}
-
-static unsigned int kvm_guest_state(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- unsigned int state;
-
- if (!kvm_pmi_in_guest(vcpu))
- return 0;
-
- state = PERF_GUEST_ACTIVE;
- if (static_call(kvm_x86_get_cpl)(vcpu))
- state |= PERF_GUEST_USER;
-
- return state;
-}
-
-static unsigned long kvm_guest_get_ip(void)
-{
- struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
-
- /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
- if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
- return 0;
-
- return kvm_rip_read(vcpu);
-}
-
static unsigned int kvm_handle_intel_pt_intr(void)
{
struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
/* '0' on failure so that the !PT case can use a RET0 static call. */
- if (!kvm_pmi_in_guest(vcpu))
+ if (!kvm_arch_pmi_in_guest(vcpu))
return 0;
kvm_make_request(KVM_REQ_PMI, vcpu);
@@ -8514,12 +8483,6 @@ static unsigned int kvm_handle_intel_pt_intr(void)
return 1;
}
-static struct perf_guest_info_callbacks kvm_guest_cbs = {
- .state = kvm_guest_state,
- .get_ip = kvm_guest_get_ip,
- .handle_intel_pt_intr = NULL,
-};
-
#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
@@ -11229,9 +11192,11 @@ int kvm_arch_hardware_setup(void *opaque)
memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops));
kvm_ops_static_call_update();
+ /* Temporary ugliness. */
if (ops->intel_pt_intr_in_guest && ops->intel_pt_intr_in_guest())
- kvm_guest_cbs.handle_intel_pt_intr = kvm_handle_intel_pt_intr;
- perf_register_guest_info_callbacks(&kvm_guest_cbs);
+ kvm_register_perf_callbacks(kvm_handle_intel_pt_intr);
+ else
+ kvm_register_perf_callbacks(NULL);
if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES))
supported_xss = 0;
@@ -11260,8 +11225,7 @@ int kvm_arch_hardware_setup(void *opaque)
void kvm_arch_hardware_unsetup(void)
{
- perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
- kvm_guest_cbs.handle_intel_pt_intr = NULL;
+ kvm_unregister_perf_callbacks();
static_call(kvm_x86_hardware_unsetup)();
}
@@ -11852,6 +11816,11 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
return vcpu->arch.preempted_in_kernel;
}
+unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)
+{
+ return kvm_rip_read(vcpu);
+}
+
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;