summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-12-08 19:13:45 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2023-12-08 19:13:45 +0100
commit6254eebad4bedd3ac258a7e0710ec9fb28d8dbe9 (patch)
tree3625d4c66f8c873754e037f9199f2e44ab1945c4 /arch
parentMerge tag 'kvm-s390-master-6.7-1' of https://git.kernel.org/pub/scm/linux/ker... (diff)
parentKVM: x86: Remove 'return void' expression for 'void function' (diff)
downloadlinux-6254eebad4bedd3ac258a7e0710ec9fb28d8dbe9.tar.xz
linux-6254eebad4bedd3ac258a7e0710ec9fb28d8dbe9.zip
Merge tag 'kvm-x86-fixes-6.7-rcN' of https://github.com/kvm-x86/linux into kvm-master
KVM fixes for 6.7-rcN: - When checking if a _running_ vCPU is "in-kernel", i.e. running at CPL0, get the CPL directly instead of relying on preempted_in_kernel, which is valid if and only if the vCPU was preempted, i.e. NOT running. - Set .owner for various KVM file_operations so that files refcount the KVM module until KVM is done executing _all_ code, including the last few instructions of kvm_put_kvm(). And then revert the misguided attempt to rely on "struct kvm" refcounts to pin KVM-the-module. - Fix a benign "return void" that was recently introduced.
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/debugfs.c1
-rw-r--r--arch/x86/kvm/x86.c9
2 files changed, 7 insertions, 3 deletions
diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c
index ee8c4c3496ed..eea6ea7f14af 100644
--- a/arch/x86/kvm/debugfs.c
+++ b/arch/x86/kvm/debugfs.c
@@ -182,6 +182,7 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file)
}
static const struct file_operations mmu_rmaps_stat_fops = {
+ .owner = THIS_MODULE,
.open = kvm_mmu_rmaps_stat_open,
.read = seq_read,
.llseek = seq_lseek,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2c924075f6f1..1a3aaa7dafae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5518,8 +5518,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu,
static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
struct kvm_xsave *guest_xsave)
{
- return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
- sizeof(guest_xsave->region));
+ kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region,
+ sizeof(guest_xsave->region));
}
static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
@@ -13031,7 +13031,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
if (vcpu->arch.guest_state_protected)
return true;
- return vcpu->arch.preempted_in_kernel;
+ if (vcpu != kvm_get_running_vcpu())
+ return vcpu->arch.preempted_in_kernel;
+
+ return static_call(kvm_x86_get_cpl)(vcpu) == 0;
}
unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu)