diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-19 19:38:36 +0200 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-19 19:38:36 +0200 |
commit | e61cf2e3a5b452cfefcb145021f5a8ea88735cc1 (patch) | |
tree | bbabaf0d4753d6880ecbaddd8daa0164d49c1c61 /virt | |
parent | Merge tag 'riscv-for-linus-4.19-mw0' of git://git.kernel.org/pub/scm/linux/ke... (diff) | |
parent | kvm: x86: Set highest physical address bits in non-present/reserved SPTEs (diff) | |
download | linux-e61cf2e3a5b452cfefcb145021f5a8ea88735cc1.tar.xz linux-e61cf2e3a5b452cfefcb145021f5a8ea88735cc1.zip |
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull first set of KVM updates from Paolo Bonzini:
"PPC:
- minor code cleanups
x86:
- PCID emulation and CR3 caching for shadow page tables
- nested VMX live migration
- nested VMCS shadowing
- optimized IPI hypercall
- some optimizations
ARM will come next week"
* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (85 commits)
kvm: x86: Set highest physical address bits in non-present/reserved SPTEs
KVM/x86: Use CC_SET()/CC_OUT in arch/x86/kvm/vmx.c
KVM: X86: Implement PV IPIs in linux guest
KVM: X86: Add kvm hypervisor init time platform setup callback
KVM: X86: Implement "send IPI" hypercall
KVM/x86: Move X86_CR4_OSXSAVE check into kvm_valid_sregs()
KVM: x86: Skip pae_root shadow allocation if tdp enabled
KVM/MMU: Combine flushing remote tlb in mmu_set_spte()
KVM: vmx: skip VMWRITE of HOST_{FS,GS}_BASE when possible
KVM: vmx: skip VMWRITE of HOST_{FS,GS}_SEL when possible
KVM: vmx: always initialize HOST_{FS,GS}_BASE to zero during setup
KVM: vmx: move struct host_state usage to struct loaded_vmcs
KVM: vmx: compute need to reload FS/GS/LDT on demand
KVM: nVMX: remove a misleading comment regarding vmcs02 fields
KVM: vmx: rename __vmx_load_host_state() and vmx_save_host_state()
KVM: vmx: add dedicated utility to access guest's kernel_gs_base
KVM: vmx: track host_state.loaded using a loaded_vmcs pointer
KVM: vmx: refactor segmentation code in vmx_save_host_state()
kvm: nVMX: Fix fault priority for VMX operations
kvm: nVMX: Fix fault vector for VMX operation at CPL > 0
...
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/kvm_main.c | 33 |
1 files changed, 19 insertions, 14 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3d233ebfbee9..9263ead9fd32 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -273,7 +273,8 @@ void kvm_flush_remote_tlbs(struct kvm *kvm) * kvm_make_all_cpus_request() reads vcpu->mode. We reuse that * barrier here. */ - if (kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) + if (!kvm_arch_flush_remote_tlb(kvm) + || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) ++kvm->stat.remote_tlb_flush; cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); } @@ -1169,7 +1170,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, n = kvm_dirty_bitmap_bytes(memslot); - dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long); + dirty_bitmap_buffer = kvm_second_dirty_bitmap(memslot); memset(dirty_bitmap_buffer, 0, n); spin_lock(&kvm->mmu_lock); @@ -1342,18 +1343,16 @@ static inline int check_user_page_hwpoison(unsigned long addr) } /* - * The atomic path to get the writable pfn which will be stored in @pfn, - * true indicates success, otherwise false is returned. + * The fast path to get the writable pfn which will be stored in @pfn, + * true indicates success, otherwise false is returned. It's also the + * only part that runs if we can are in atomic context. */ -static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, - bool write_fault, bool *writable, kvm_pfn_t *pfn) +static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, + bool *writable, kvm_pfn_t *pfn) { struct page *page[1]; int npages; - if (!(async || atomic)) - return false; - /* * Fast pin a writable pfn only if it is a write fault request * or the caller allows to map a writable pfn for a read fault @@ -1497,7 +1496,7 @@ static kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, /* we can do it either atomically or asynchronously, not both */ BUG_ON(atomic && async); - if (hva_to_pfn_fast(addr, atomic, async, write_fault, writable, &pfn)) + if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) return pfn; if (atomic) @@ -2127,16 +2126,22 @@ static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu) static int kvm_vcpu_check_block(struct kvm_vcpu *vcpu) { + int ret = -EINTR; + int idx = srcu_read_lock(&vcpu->kvm->srcu); + if (kvm_arch_vcpu_runnable(vcpu)) { kvm_make_request(KVM_REQ_UNHALT, vcpu); - return -EINTR; + goto out; } if (kvm_cpu_has_pending_timer(vcpu)) - return -EINTR; + goto out; if (signal_pending(current)) - return -EINTR; + goto out; - return 0; + ret = 0; +out: + srcu_read_unlock(&vcpu->kvm->srcu, idx); + return ret; } /* |