summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-12 20:05:52 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-12 20:05:52 +0200
commit52cd0d972fa6491928add05f11f97a4a59babe92 (patch)
tree5e53cff155288b4d24c33754905bca4a8504b4bb /virt
parentMerge tag 'for-linus-5.8b-rc1-tag' of git://git.kernel.org/pub/scm/linux/kern... (diff)
parentMerge tag 'kvmarm-fixes-5.8-1' of git://git.kernel.org/pub/scm/linux/kernel/g... (diff)
downloadlinux-52cd0d972fa6491928add05f11f97a4a59babe92.tar.xz
linux-52cd0d972fa6491928add05f11f97a4a59babe92.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull more KVM updates from Paolo Bonzini: "The guest side of the asynchronous page fault work has been delayed to 5.9 in order to sync with Thomas's interrupt entry rework, but here's the rest of the KVM updates for this merge window. MIPS: - Loongson port PPC: - Fixes ARM: - Fixes x86: - KVM_SET_USER_MEMORY_REGION optimizations - Fixes - Selftest fixes" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (62 commits) KVM: x86: do not pass poisoned hva to __kvm_set_memory_region KVM: selftests: fix sync_with_host() in smm_test KVM: async_pf: Inject 'page ready' event only if 'page not present' was previously injected KVM: async_pf: Cleanup kvm_setup_async_pf() kvm: i8254: remove redundant assignment to pointer s KVM: x86: respect singlestep when emulating instruction KVM: selftests: Don't probe KVM_CAP_HYPERV_ENLIGHTENED_VMCS when nested VMX is unsupported KVM: selftests: do not substitute SVM/VMX check with KVM_CAP_NESTED_STATE check KVM: nVMX: Consult only the "basic" exit reason when routing nested exit KVM: arm64: Move hyp_symbol_addr() to kvm_asm.h KVM: arm64: Synchronize sysreg state on injecting an AArch32 exception KVM: arm64: Make vcpu_cp1x() work on Big Endian hosts KVM: arm64: Remove host_cpu_context member from vcpu structure KVM: arm64: Stop sparse from moaning at __hyp_this_cpu_ptr KVM: arm64: Handle PtrAuth traps early KVM: x86: Unexport x86_fpu_cache and make it static KVM: selftests: Ignore KVM 5-level paging support for VM_MODE_PXXV48_4K KVM: arm64: Save the host's PtrAuth keys in non-preemptible context KVM: arm64: Stop save/restoring ACTLR_EL1 KVM: arm64: Add emulation for 32bit guests accessing ACTLR2 ...
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/async_pf.c21
-rw-r--r--virt/kvm/kvm_main.c53
2 files changed, 35 insertions, 39 deletions
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 5f8f3e8b5add..45799606bb3e 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -164,7 +164,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
return 0;
- /* setup delayed work */
+ /* Arch specific code should not do async PF in this case */
+ if (unlikely(kvm_is_error_hva(hva)))
+ return 0;
/*
* do alloc nowait since if we are going to sleep anyway we
@@ -183,24 +185,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
mmget(work->mm);
kvm_get_kvm(work->vcpu->kvm);
- /* this can't really happen otherwise gfn_to_pfn_async
- would succeed */
- if (unlikely(kvm_is_error_hva(work->addr)))
- goto retry_sync;
-
INIT_WORK(&work->work, async_pf_execute);
- if (!schedule_work(&work->work))
- goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++;
- kvm_arch_async_page_not_present(vcpu, work);
+ work->notpresent_injected = kvm_arch_async_page_not_present(vcpu, work);
+
+ schedule_work(&work->work);
+
return 1;
-retry_sync:
- kvm_put_kvm(work->vcpu->kvm);
- mmput(work->mm);
- kmem_cache_free(async_pf_cache, work);
- return 0;
}
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 0dfee7576e88..a852af5c3214 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -154,10 +154,9 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
static unsigned long long kvm_createvm_count;
static unsigned long long kvm_active_vms;
-__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end, bool blockable)
+__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end)
{
- return 0;
}
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn)
@@ -383,6 +382,18 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier);
}
+static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ struct kvm *kvm = mmu_notifier_to_kvm(mn);
+ int idx;
+
+ idx = srcu_read_lock(&kvm->srcu);
+ kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
+ srcu_read_unlock(&kvm->srcu, idx);
+}
+
static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long address,
@@ -407,7 +418,6 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
{
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int need_tlb_flush = 0, idx;
- int ret;
idx = srcu_read_lock(&kvm->srcu);
spin_lock(&kvm->mmu_lock);
@@ -424,14 +434,9 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
kvm_flush_remote_tlbs(kvm);
spin_unlock(&kvm->mmu_lock);
-
- ret = kvm_arch_mmu_notifier_invalidate_range(kvm, range->start,
- range->end,
- mmu_notifier_range_blockable(range));
-
srcu_read_unlock(&kvm->srcu, idx);
- return ret;
+ return 0;
}
static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -537,6 +542,7 @@ static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
}
static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
+ .invalidate_range = kvm_mmu_notifier_invalidate_range,
.invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
.invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
.clear_flush_young = kvm_mmu_notifier_clear_flush_young,
@@ -2970,7 +2976,6 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
{
struct kvm_vcpu *vcpu = filp->private_data;
- debugfs_remove_recursive(vcpu->debugfs_dentry);
kvm_put_kvm(vcpu->kvm);
return 0;
}
@@ -2997,16 +3002,17 @@ static int create_vcpu_fd(struct kvm_vcpu *vcpu)
static void kvm_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
{
#ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS
+ struct dentry *debugfs_dentry;
char dir_name[ITOA_MAX_LEN * 2];
if (!debugfs_initialized())
return;
snprintf(dir_name, sizeof(dir_name), "vcpu%d", vcpu->vcpu_id);
- vcpu->debugfs_dentry = debugfs_create_dir(dir_name,
- vcpu->kvm->debugfs_dentry);
+ debugfs_dentry = debugfs_create_dir(dir_name,
+ vcpu->kvm->debugfs_dentry);
- kvm_arch_create_vcpu_debugfs(vcpu);
+ kvm_arch_create_vcpu_debugfs(vcpu, debugfs_dentry);
#endif
}
@@ -3743,21 +3749,18 @@ static long kvm_vm_ioctl(struct file *filp,
if (routing.flags)
goto out;
if (routing.nr) {
- r = -ENOMEM;
- entries = vmalloc(array_size(sizeof(*entries),
- routing.nr));
- if (!entries)
- goto out;
- r = -EFAULT;
urouting = argp;
- if (copy_from_user(entries, urouting->entries,
- routing.nr * sizeof(*entries)))
- goto out_free_irq_routing;
+ entries = vmemdup_user(urouting->entries,
+ array_size(sizeof(*entries),
+ routing.nr));
+ if (IS_ERR(entries)) {
+ r = PTR_ERR(entries);
+ goto out;
+ }
}
r = kvm_set_irq_routing(kvm, entries, routing.nr,
routing.flags);
-out_free_irq_routing:
- vfree(entries);
+ kvfree(entries);
break;
}
#endif /* CONFIG_HAVE_KVM_IRQ_ROUTING */