diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-10 19:46:46 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-12-10 19:46:46 +0100 |
commit | 0aea22c7ab05f9dfebbccf265a399331435b8938 (patch) | |
tree | cd0038f4c03f949155aebbfd97b525757d218c42 /arch | |
parent | Merge tag 'powerpc-6.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/po... (diff) | |
parent | KVM: SVM: Update EFER software model on CR0 trap for SEV-ES (diff) | |
download | linux-0aea22c7ab05f9dfebbccf265a399331435b8938.tar.xz linux-0aea22c7ab05f9dfebbccf265a399331435b8938.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini:
"Generic:
- Set .owner for various KVM file_operations so that files refcount
the KVM module until KVM is done executing _all_ code, including
the last few instructions of kvm_put_kvm(). And then revert the
misguided attempt to rely on "struct kvm" refcounts to pin
KVM-the-module.
ARM:
- Do not redo the mapping of vLPIs, if they have already been mapped
s390:
- Do not leave bits behind in PTEs
- Properly catch page invalidations that affect the prefix of a
nested guest
x86:
- When checking if a _running_ vCPU is "in-kernel", i.e. running at
CPL0, get the CPL directly instead of relying on
preempted_in_kernel (which is valid if and only if the vCPU was
preempted, i.e. NOT running).
- Fix a benign "return void" that was recently introduced.
Selftests:
- Makefile tweak for dependency generation
- '-Wformat' fix"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: SVM: Update EFER software model on CR0 trap for SEV-ES
KVM: selftests: add -MP to CFLAGS
KVM: selftests: Actually print out magic token in NX hugepages skip message
KVM: x86: Remove 'return void' expression for 'void function'
Revert "KVM: Prevent module exit until all VMs are freed"
KVM: Set file_operations.owner appropriately for all such structures
KVM: x86: Get CPL directly when checking if loaded vCPU is in kernel mode
KVM: arm64: GICv4: Do not perform a map to a mapped vLPI
KVM: s390/mm: Properly reset no-dat
KVM: s390: vsie: fix wrong VIR 37 when MSO is used
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm64/kvm/vgic/vgic-v4.c | 4 | ||||
-rw-r--r-- | arch/s390/kvm/vsie.c | 4 | ||||
-rw-r--r-- | arch/s390/mm/pgtable.c | 2 | ||||
-rw-r--r-- | arch/x86/kvm/debugfs.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/svm/svm.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 9 |
6 files changed, 17 insertions, 11 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c index 339a55194b2c..74a67ad87f29 100644 --- a/arch/arm64/kvm/vgic/vgic-v4.c +++ b/arch/arm64/kvm/vgic/vgic-v4.c @@ -436,6 +436,10 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, if (ret) goto out; + /* Silently exit if the vLPI is already mapped */ + if (irq->hw) + goto out; + /* * Emit the mapping request. If it fails, the ITS probably * isn't v4 compatible, so let's silently bail out. Holding diff --git a/arch/s390/kvm/vsie.c b/arch/s390/kvm/vsie.c index 02dcbe82a8e5..8207a892bbe2 100644 --- a/arch/s390/kvm/vsie.c +++ b/arch/s390/kvm/vsie.c @@ -587,10 +587,6 @@ void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start, if (!gmap_is_shadow(gmap)) return; - if (start >= 1UL << 31) - /* We are only interested in prefix pages */ - return; - /* * Only new shadow blocks are added to the list during runtime, * therefore we can safely reference them all the time. diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 3bd2ab2a9a34..5cb92941540b 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -756,7 +756,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, pte_clear(mm, addr, ptep); } if (reset) - pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK; + pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT); pgste_set_unlock(ptep, pgste); preempt_enable(); } diff --git a/arch/x86/kvm/debugfs.c b/arch/x86/kvm/debugfs.c index ee8c4c3496ed..eea6ea7f14af 100644 --- a/arch/x86/kvm/debugfs.c +++ b/arch/x86/kvm/debugfs.c @@ -182,6 +182,7 @@ static int kvm_mmu_rmaps_stat_release(struct inode *inode, struct file *file) } static const struct file_operations mmu_rmaps_stat_fops = { + .owner = THIS_MODULE, .open = kvm_mmu_rmaps_stat_open, .read = seq_read, .llseek = seq_lseek, diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 712146312358..f3bb30b40876 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1855,15 +1855,17 @@ void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) bool old_paging = is_paging(vcpu); #ifdef CONFIG_X86_64 - if (vcpu->arch.efer & EFER_LME && !vcpu->arch.guest_state_protected) { + if (vcpu->arch.efer & EFER_LME) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { vcpu->arch.efer |= EFER_LMA; - svm->vmcb->save.efer |= EFER_LMA | EFER_LME; + if (!vcpu->arch.guest_state_protected) + svm->vmcb->save.efer |= EFER_LMA | EFER_LME; } if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { vcpu->arch.efer &= ~EFER_LMA; - svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); + if (!vcpu->arch.guest_state_protected) + svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); } } #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2c924075f6f1..1a3aaa7dafae 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5518,8 +5518,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave2(struct kvm_vcpu *vcpu, static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, struct kvm_xsave *guest_xsave) { - return kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, - sizeof(guest_xsave->region)); + kvm_vcpu_ioctl_x86_get_xsave2(vcpu, (void *)guest_xsave->region, + sizeof(guest_xsave->region)); } static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, @@ -13031,7 +13031,10 @@ bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) if (vcpu->arch.guest_state_protected) return true; - return vcpu->arch.preempted_in_kernel; + if (vcpu != kvm_get_running_vcpu()) + return vcpu->arch.preempted_in_kernel; + + return static_call(kvm_x86_get_cpl)(vcpu) == 0; } unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) |