diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-01-10 18:05:18 +0100 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-01-10 18:05:18 +0100 |
commit | ccae663cd4f62890d862c660e5ed762eb9821c14 (patch) | |
tree | 7c9f110db7992aad60fcc01718a827affc4f33df /arch | |
parent | Merge tag 'trace-3.8-rc2-regression-fix' of git://git.kernel.org/pub/scm/linu... (diff) | |
parent | KVM: x86: use dynamic percpu allocations for shared msrs area (diff) | |
download | linux-ccae663cd4f62890d862c660e5ed762eb9821c14.tar.xz linux-ccae663cd4f62890d862c660e5ed762eb9821c14.zip |
Merge git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM bugfixes from Marcelo Tosatti.
* git://git.kernel.org/pub/scm/virt/kvm/kvm:
KVM: x86: use dynamic percpu allocations for shared msrs area
KVM: PPC: Book3S HV: Fix compilation without CONFIG_PPC_POWERNV
powerpc: Corrected include header path in kvm_para.h
Add rcu user eqs exception hooks for async page fault
Diffstat (limited to 'arch')
-rw-r--r-- | arch/powerpc/include/uapi/asm/kvm_para.h | 2 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_ras.c | 4 | ||||
-rw-r--r-- | arch/x86/kernel/kvm.c | 12 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 24 |
4 files changed, 33 insertions, 9 deletions
diff --git a/arch/powerpc/include/uapi/asm/kvm_para.h b/arch/powerpc/include/uapi/asm/kvm_para.h index ed0e0254b47f..e3af3286a068 100644 --- a/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/arch/powerpc/include/uapi/asm/kvm_para.h @@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared { #define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) -#include <uapi/asm/epapr_hcalls.h> +#include <asm/epapr_hcalls.h> #define KVM_FEATURE_MAGIC_PAGE 1 diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 35f3cf0269b3..a353c485808c 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -79,7 +79,9 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu) static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; +#ifdef CONFIG_PPC_POWERNV struct opal_machine_check_event *opal_evt; +#endif long handled = 1; if (srr1 & SRR1_MC_LDSTERR) { @@ -117,6 +119,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) handled = 0; } +#ifdef CONFIG_PPC_POWERNV /* * See if OPAL has already handled the condition. * We assume that if the condition is recovered then OPAL @@ -131,6 +134,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) if (handled) opal_evt->in_use = 0; +#endif return handled; } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 08b973f64032..9c2bd8bd4b4c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -43,6 +43,7 @@ #include <asm/apicdef.h> #include <asm/hypervisor.h> #include <asm/kvm_guest.h> +#include <asm/context_tracking.h> static int kvmapf = 1; @@ -121,6 +122,8 @@ void kvm_async_pf_task_wait(u32 token) struct kvm_task_sleep_node n, *e; DEFINE_WAIT(wait); + rcu_irq_enter(); + spin_lock(&b->lock); e = _find_apf_task(b, token); if (e) { @@ -128,6 +131,8 @@ void kvm_async_pf_task_wait(u32 token) hlist_del(&e->link); kfree(e); spin_unlock(&b->lock); + + rcu_irq_exit(); return; } @@ -152,13 +157,16 @@ void kvm_async_pf_task_wait(u32 token) /* * We cannot reschedule. So halt. */ + rcu_irq_exit(); native_safe_halt(); + rcu_irq_enter(); local_irq_disable(); } } if (!n.halted) finish_wait(&n.wq, &wait); + rcu_irq_exit(); return; } EXPORT_SYMBOL_GPL(kvm_async_pf_task_wait); @@ -252,10 +260,10 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) break; case KVM_PV_REASON_PAGE_NOT_PRESENT: /* page is swapped out by the host. */ - rcu_irq_enter(); + exception_enter(regs); exit_idle(); kvm_async_pf_task_wait((u32)read_cr2()); - rcu_irq_exit(); + exception_exit(regs); break; case KVM_PV_REASON_PAGE_READY: rcu_irq_enter(); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 76f54461f7cb..c243b81e3c74 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -120,7 +120,7 @@ struct kvm_shared_msrs { }; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; -static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); +static struct kvm_shared_msrs __percpu *shared_msrs; struct kvm_stats_debugfs_item debugfs_entries[] = { { "pf_fixed", VCPU_STAT(pf_fixed) }, @@ -191,10 +191,10 @@ static void kvm_on_user_return(struct user_return_notifier *urn) static void shared_msr_update(unsigned slot, u32 msr) { - struct kvm_shared_msrs *smsr; u64 value; + unsigned int cpu = smp_processor_id(); + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); - smsr = &__get_cpu_var(shared_msrs); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { @@ -226,7 +226,8 @@ static void kvm_shared_msr_cpu_online(void) void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) { - struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); + unsigned int cpu = smp_processor_id(); + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (((value ^ smsr->values[slot].curr) & mask) == 0) return; @@ -242,7 +243,8 @@ EXPORT_SYMBOL_GPL(kvm_set_shared_msr); static void drop_user_return_notifiers(void *ignore) { - struct kvm_shared_msrs *smsr = &__get_cpu_var(shared_msrs); + unsigned int cpu = smp_processor_id(); + struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); if (smsr->registered) kvm_on_user_return(&smsr->urn); @@ -5233,9 +5235,16 @@ int kvm_arch_init(void *opaque) goto out; } + r = -ENOMEM; + shared_msrs = alloc_percpu(struct kvm_shared_msrs); + if (!shared_msrs) { + printk(KERN_ERR "kvm: failed to allocate percpu kvm_shared_msrs\n"); + goto out; + } + r = kvm_mmu_module_init(); if (r) - goto out; + goto out_free_percpu; kvm_set_mmio_spte_mask(); kvm_init_msr_list(); @@ -5258,6 +5267,8 @@ int kvm_arch_init(void *opaque) return 0; +out_free_percpu: + free_percpu(shared_msrs); out: return r; } @@ -5275,6 +5286,7 @@ void kvm_arch_exit(void) #endif kvm_x86_ops = NULL; kvm_mmu_module_exit(); + free_percpu(shared_msrs); } int kvm_emulate_halt(struct kvm_vcpu *vcpu) |