diff options
author | Julien Thierry <julien.thierry@arm.com> | 2019-01-07 16:06:17 +0100 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@arm.com> | 2019-01-24 15:09:09 +0100 |
commit | e08d8d296079e8fd7eefd53f73dcafebd3a5bf9f (patch) | |
tree | f5c8dd9720124448df761c27d636a71d8dd0ca02 /virt | |
parent | KVM: arm/arm64: vgic: Make vgic_dist->lpi_list_lock a raw_spinlock (diff) | |
download | linux-e08d8d296079e8fd7eefd53f73dcafebd3a5bf9f.tar.xz linux-e08d8d296079e8fd7eefd53f73dcafebd3a5bf9f.zip |
KVM: arm/arm64: vgic: Make vgic_cpu->ap_list_lock a raw_spinlock
vgic_cpu->ap_list_lock must always be taken with interrupts disabled as
it is used in interrupt context.
For configurations such as PREEMPT_RT_FULL, this means that it should
be a raw_spinlock since RT spinlocks are interruptible.
Signed-off-by: Julien Thierry <julien.thierry@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic/vgic-init.c | 2 | ||||
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 37 |
2 files changed, 20 insertions, 19 deletions
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index 330c1ada7326..dfbfcb1fe933 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -206,7 +206,7 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu) vgic_cpu->sgi_iodev.base_addr = VGIC_ADDR_UNDEF; INIT_LIST_HEAD(&vgic_cpu->ap_list_head); - spin_lock_init(&vgic_cpu->ap_list_lock); + raw_spin_lock_init(&vgic_cpu->ap_list_lock); /* * Enable and configure all SGIs to be edge-triggered and diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index ea54a1923c4f..abd9c7352677 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -54,11 +54,11 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { * When taking more than one ap_list_lock at the same time, always take the * lowest numbered VCPU's ap_list_lock first, so: * vcpuX->vcpu_id < vcpuY->vcpu_id: - * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); - * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); + * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); + * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); * * Since the VGIC must support injecting virtual interrupts from ISRs, we have - * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer + * to use the raw_spin_lock_irqsave/raw_spin_unlock_irqrestore versions of outer * spinlocks for any lock that may be taken while injecting an interrupt. */ @@ -351,7 +351,7 @@ retry: /* someone can do stuff here, which we re-check below */ - spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); raw_spin_lock(&irq->irq_lock); /* @@ -368,7 +368,8 @@ retry: if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { raw_spin_unlock(&irq->irq_lock); - spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, + flags); raw_spin_lock_irqsave(&irq->irq_lock, flags); goto retry; @@ -383,7 +384,7 @@ retry: irq->vcpu = vcpu; raw_spin_unlock(&irq->irq_lock); - spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); + raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_vcpu_kick(vcpu); @@ -597,7 +598,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); retry: - spin_lock(&vgic_cpu->ap_list_lock); + raw_spin_lock(&vgic_cpu->ap_list_lock); list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; @@ -638,7 +639,7 @@ retry: /* This interrupt looks like it has to be migrated. */ raw_spin_unlock(&irq->irq_lock); - spin_unlock(&vgic_cpu->ap_list_lock); + raw_spin_unlock(&vgic_cpu->ap_list_lock); /* * Ensure locking order by always locking the smallest @@ -652,9 +653,9 @@ retry: vcpuB = vcpu; } - spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); - spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, - SINGLE_DEPTH_NESTING); + raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); + raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, + SINGLE_DEPTH_NESTING); raw_spin_lock(&irq->irq_lock); /* @@ -676,8 +677,8 @@ retry: } raw_spin_unlock(&irq->irq_lock); - spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); - spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); if (target_vcpu_needs_kick) { kvm_make_request(KVM_REQ_IRQ_PENDING, target_vcpu); @@ -687,7 +688,7 @@ retry: goto retry; } - spin_unlock(&vgic_cpu->ap_list_lock); + raw_spin_unlock(&vgic_cpu->ap_list_lock); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) @@ -872,9 +873,9 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); - spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); vgic_flush_lr_state(vcpu); - spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); if (can_access_vgic_from_kernel()) vgic_restore_state(vcpu); @@ -918,7 +919,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) vgic_get_vmcr(vcpu, &vmcr); - spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); + raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { raw_spin_lock(&irq->irq_lock); @@ -931,7 +932,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) break; } - spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); + raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); return pending; } |