diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-12-15 13:06:40 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-12-15 13:06:40 +0100 |
commit | 333bce5aac9e8cb7f6b27e0122a224d17be4dd5d (patch) | |
tree | 7a171a51d9a999f90dd6f17f2b7dd0997dbc1a38 /virt/kvm | |
parent | KVM: x86: em_ret_far overrides cpl (diff) | |
parent | arm/arm64: KVM: Require in-kernel vgic for the arch timers (diff) | |
download | linux-333bce5aac9e8cb7f6b27e0122a224d17be4dd5d.tar.xz linux-333bce5aac9e8cb7f6b27e0122a224d17be4dd5d.zip |
Merge tag 'kvm-arm-for-3.19-take2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD
Second round of changes for KVM for arm/arm64 for v3.19; fixes reboot
problems, clarifies VCPU init, and fixes a regression concerning the
VGIC init flow.
Conflicts:
arch/ia64/kvm/kvm-ia64.c [deleted in HEAD and modified in kvmarm]
Diffstat (limited to 'virt/kvm')
-rw-r--r-- | virt/kvm/arm/arch_timer.c | 30 | ||||
-rw-r--r-- | virt/kvm/arm/vgic.c | 116 | ||||
-rw-r--r-- | virt/kvm/kvm_main.c | 16 |
3 files changed, 90 insertions, 72 deletions
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 22fa819a9b6a..1c0772b340d8 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -61,12 +61,14 @@ static void timer_disarm(struct arch_timer_cpu *timer) static void kvm_timer_inject_irq(struct kvm_vcpu *vcpu) { + int ret; struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; timer->cntv_ctl |= ARCH_TIMER_CTRL_IT_MASK; - kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, - timer->irq->irq, - timer->irq->level); + ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, + timer->irq->irq, + timer->irq->level); + WARN_ON(ret); } static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) @@ -307,12 +309,24 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) timer_disarm(timer); } -int kvm_timer_init(struct kvm *kvm) +void kvm_timer_enable(struct kvm *kvm) { - if (timecounter && wqueue) { - kvm->arch.timer.cntvoff = kvm_phys_timer_read(); + if (kvm->arch.timer.enabled) + return; + + /* + * There is a potential race here between VCPUs starting for the first + * time, which may be enabling the timer multiple times. That doesn't + * hurt though, because we're just setting a variable to the same + * variable that it already was. The important thing is that all + * VCPUs have the enabled variable set, before entering the guest, if + * the arch timers are enabled. + */ + if (timecounter && wqueue) kvm->arch.timer.enabled = 1; - } +} - return 0; +void kvm_timer_init(struct kvm *kvm) +{ + kvm->arch.timer.cntvoff = kvm_phys_timer_read(); } diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index 3aaca49de325..e373b76c5420 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -91,6 +91,7 @@ #define ACCESS_WRITE_VALUE (3 << 1) #define ACCESS_WRITE_MASK(x) ((x) & (3 << 1)) +static int vgic_init(struct kvm *kvm); static void vgic_retire_disabled_irqs(struct kvm_vcpu *vcpu); static void vgic_retire_lr(int lr_nr, int irq, struct kvm_vcpu *vcpu); static void vgic_update_state(struct kvm *kvm); @@ -1607,7 +1608,7 @@ static int vgic_validate_injection(struct kvm_vcpu *vcpu, int irq, int level) } } -static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, +static int vgic_update_irq_pending(struct kvm *kvm, int cpuid, unsigned int irq_num, bool level) { struct vgic_dist *dist = &kvm->arch.vgic; @@ -1643,9 +1644,10 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, vgic_dist_irq_clear_level(vcpu, irq_num); if (!vgic_dist_irq_soft_pend(vcpu, irq_num)) vgic_dist_irq_clear_pending(vcpu, irq_num); - } else { - vgic_dist_irq_clear_pending(vcpu, irq_num); } + + ret = false; + goto out; } enabled = vgic_irq_is_enabled(vcpu, irq_num); @@ -1672,7 +1674,7 @@ static bool vgic_update_irq_pending(struct kvm *kvm, int cpuid, out: spin_unlock(&dist->lock); - return ret; + return ret ? cpuid : -EINVAL; } /** @@ -1692,11 +1694,26 @@ out: int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num, bool level) { - if (likely(vgic_initialized(kvm)) && - vgic_update_irq_pending(kvm, cpuid, irq_num, level)) - vgic_kick_vcpus(kvm); + int ret = 0; + int vcpu_id; - return 0; + if (unlikely(!vgic_initialized(kvm))) { + mutex_lock(&kvm->lock); + ret = vgic_init(kvm); + mutex_unlock(&kvm->lock); + + if (ret) + goto out; + } + + vcpu_id = vgic_update_irq_pending(kvm, cpuid, irq_num, level); + if (vcpu_id >= 0) { + /* kick the specified vcpu */ + kvm_vcpu_kick(kvm_get_vcpu(kvm, vcpu_id)); + } + +out: + return ret; } static irqreturn_t vgic_maintenance_handler(int irq, void *data) @@ -1726,39 +1743,14 @@ static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs) int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8; vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL); - vgic_cpu->vgic_irq_lr_map = kzalloc(nr_irqs, GFP_KERNEL); + vgic_cpu->vgic_irq_lr_map = kmalloc(nr_irqs, GFP_KERNEL); if (!vgic_cpu->pending_shared || !vgic_cpu->vgic_irq_lr_map) { kvm_vgic_vcpu_destroy(vcpu); return -ENOMEM; } - return 0; -} - -/** - * kvm_vgic_vcpu_init - Initialize per-vcpu VGIC state - * @vcpu: pointer to the vcpu struct - * - * Initialize the vgic_cpu struct and vgic_dist struct fields pertaining to - * this vcpu and enable the VGIC for this VCPU - */ -static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) -{ - struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; - struct vgic_dist *dist = &vcpu->kvm->arch.vgic; - int i; - - for (i = 0; i < dist->nr_irqs; i++) { - if (i < VGIC_NR_PPIS) - vgic_bitmap_set_irq_val(&dist->irq_enabled, - vcpu->vcpu_id, i, 1); - if (i < VGIC_NR_PRIVATE_IRQS) - vgic_bitmap_set_irq_val(&dist->irq_cfg, - vcpu->vcpu_id, i, VGIC_CFG_EDGE); - - vgic_cpu->vgic_irq_lr_map[i] = LR_EMPTY; - } + memset(vgic_cpu->vgic_irq_lr_map, LR_EMPTY, nr_irqs); /* * Store the number of LRs per vcpu, so we don't have to go @@ -1767,7 +1759,7 @@ static void kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) */ vgic_cpu->nr_lr = vgic->nr_lr; - vgic_enable(vcpu); + return 0; } void kvm_vgic_destroy(struct kvm *kvm) @@ -1798,20 +1790,21 @@ void kvm_vgic_destroy(struct kvm *kvm) dist->irq_spi_cpu = NULL; dist->irq_spi_target = NULL; dist->irq_pending_on_cpu = NULL; + dist->nr_cpus = 0; } /* * Allocate and initialize the various data structures. Must be called * with kvm->lock held! */ -static int vgic_init_maps(struct kvm *kvm) +static int vgic_init(struct kvm *kvm) { struct vgic_dist *dist = &kvm->arch.vgic; struct kvm_vcpu *vcpu; int nr_cpus, nr_irqs; - int ret, i; + int ret, i, vcpu_id; - if (dist->nr_cpus) /* Already allocated */ + if (vgic_initialized(kvm)) return 0; nr_cpus = dist->nr_cpus = atomic_read(&kvm->online_vcpus); @@ -1859,16 +1852,28 @@ static int vgic_init_maps(struct kvm *kvm) if (ret) goto out; - kvm_for_each_vcpu(i, vcpu, kvm) { + for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) + vgic_set_target_reg(kvm, 0, i); + + kvm_for_each_vcpu(vcpu_id, vcpu, kvm) { ret = vgic_vcpu_init_maps(vcpu, nr_irqs); if (ret) { kvm_err("VGIC: Failed to allocate vcpu memory\n"); break; } - } - for (i = VGIC_NR_PRIVATE_IRQS; i < dist->nr_irqs; i += 4) - vgic_set_target_reg(kvm, 0, i); + for (i = 0; i < dist->nr_irqs; i++) { + if (i < VGIC_NR_PPIS) + vgic_bitmap_set_irq_val(&dist->irq_enabled, + vcpu->vcpu_id, i, 1); + if (i < VGIC_NR_PRIVATE_IRQS) + vgic_bitmap_set_irq_val(&dist->irq_cfg, + vcpu->vcpu_id, i, + VGIC_CFG_EDGE); + } + + vgic_enable(vcpu); + } out: if (ret) @@ -1878,25 +1883,23 @@ out: } /** - * kvm_vgic_init - Initialize global VGIC state before running any VCPUs + * kvm_vgic_map_resources - Configure global VGIC state before running any VCPUs * @kvm: pointer to the kvm struct * * Map the virtual CPU interface into the VM before running any VCPUs. We * can't do this at creation time, because user space must first set the - * virtual CPU interface address in the guest physical address space. Also - * initialize the ITARGETSRn regs to 0 on the emulated distributor. + * virtual CPU interface address in the guest physical address space. */ -int kvm_vgic_init(struct kvm *kvm) +int kvm_vgic_map_resources(struct kvm *kvm) { - struct kvm_vcpu *vcpu; - int ret = 0, i; + int ret = 0; if (!irqchip_in_kernel(kvm)) return 0; mutex_lock(&kvm->lock); - if (vgic_initialized(kvm)) + if (vgic_ready(kvm)) goto out; if (IS_VGIC_ADDR_UNDEF(kvm->arch.vgic.vgic_dist_base) || @@ -1906,7 +1909,11 @@ int kvm_vgic_init(struct kvm *kvm) goto out; } - ret = vgic_init_maps(kvm); + /* + * Initialize the vgic if this hasn't already been done on demand by + * accessing the vgic state from userspace. + */ + ret = vgic_init(kvm); if (ret) { kvm_err("Unable to allocate maps\n"); goto out; @@ -1920,9 +1927,6 @@ int kvm_vgic_init(struct kvm *kvm) goto out; } - kvm_for_each_vcpu(i, vcpu, kvm) - kvm_vgic_vcpu_init(vcpu); - kvm->arch.vgic.ready = true; out: if (ret) @@ -2167,7 +2171,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev, mutex_lock(&dev->kvm->lock); - ret = vgic_init_maps(dev->kvm); + ret = vgic_init(dev->kvm); if (ret) goto out; @@ -2289,7 +2293,7 @@ static int vgic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) mutex_lock(&dev->kvm->lock); - if (vgic_initialized(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) + if (vgic_ready(dev->kvm) || dev->kvm->arch.vgic.nr_irqs) ret = -EBUSY; else dev->kvm->arch.vgic.nr_irqs = val; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c5c186af823b..f5283438ee05 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting); static bool largepages_enabled = true; -bool kvm_is_mmio_pfn(pfn_t pfn) +bool kvm_is_reserved_pfn(pfn_t pfn) { if (pfn_valid(pfn)) - return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); + return PageReserved(pfn_to_page(pfn)); return true; } @@ -1301,7 +1301,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async, else if ((vma->vm_flags & VM_PFNMAP)) { pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; - BUG_ON(!kvm_is_mmio_pfn(pfn)); + BUG_ON(!kvm_is_reserved_pfn(pfn)); } else { if (async && vma_is_valid(vma, write_fault)) *async = true; @@ -1407,7 +1407,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn) if (is_error_noslot_pfn(pfn)) return KVM_ERR_PTR_BAD_PAGE; - if (kvm_is_mmio_pfn(pfn)) { + if (kvm_is_reserved_pfn(pfn)) { WARN_ON(1); return KVM_ERR_PTR_BAD_PAGE; } @@ -1436,7 +1436,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean); void kvm_release_pfn_clean(pfn_t pfn) { - if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) + if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn)) put_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); @@ -1457,7 +1457,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn) void kvm_set_pfn_dirty(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) { + if (!kvm_is_reserved_pfn(pfn)) { struct page *page = pfn_to_page(pfn); if (!PageReserved(page)) SetPageDirty(page); @@ -1467,14 +1467,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty); void kvm_set_pfn_accessed(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) mark_page_accessed(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); void kvm_get_pfn(pfn_t pfn) { - if (!kvm_is_mmio_pfn(pfn)) + if (!kvm_is_reserved_pfn(pfn)) get_page(pfn_to_page(pfn)); } EXPORT_SYMBOL_GPL(kvm_get_pfn); |