summaryrefslogtreecommitdiffstats
path: root/drivers/xen/events.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/xen/events.c')
-rw-r--r--drivers/xen/events.c114
1 files changed, 112 insertions, 2 deletions
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 76e5b7386af9..332dd63750a0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -355,7 +355,7 @@ static void unbind_from_irq(unsigned int irq)
spin_lock(&irq_mapping_update_lock);
- if (VALID_EVTCHN(evtchn) && (--irq_bindcount[irq] == 0)) {
+ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
close.port = evtchn;
if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
BUG();
@@ -375,7 +375,7 @@ static void unbind_from_irq(unsigned int irq)
evtchn_to_irq[evtchn] = -1;
irq_info[irq] = IRQ_UNBOUND;
- dynamic_irq_init(irq);
+ dynamic_irq_cleanup(irq);
}
spin_unlock(&irq_mapping_update_lock);
@@ -557,6 +557,33 @@ out:
put_cpu();
}
+/* Rebind a new event channel to an existing irq. */
+void rebind_evtchn_irq(int evtchn, int irq)
+{
+ /* Make sure the irq is masked, since the new event channel
+ will also be masked. */
+ disable_irq(irq);
+
+ spin_lock(&irq_mapping_update_lock);
+
+ /* After resume the irq<->evtchn mappings are all cleared out */
+ BUG_ON(evtchn_to_irq[evtchn] != -1);
+ /* Expect irq to have been bound before,
+ so the bindcount should be non-0 */
+ BUG_ON(irq_bindcount[irq] == 0);
+
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_EVTCHN, 0, evtchn);
+
+ spin_unlock(&irq_mapping_update_lock);
+
+ /* new event channels are always bound to cpu 0 */
+ irq_set_affinity(irq, cpumask_of_cpu(0));
+
+ /* Unmask the event channel. */
+ enable_irq(irq);
+}
+
/* Rebind an evtchn so that it gets delivered to a specific cpu */
static void rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
{
@@ -647,6 +674,89 @@ static int retrigger_dynirq(unsigned int irq)
return ret;
}
+static void restore_cpu_virqs(unsigned int cpu)
+{
+ struct evtchn_bind_virq bind_virq;
+ int virq, irq, evtchn;
+
+ for (virq = 0; virq < NR_VIRQS; virq++) {
+ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq].type != IRQT_VIRQ);
+ BUG_ON(irq_info[irq].index != virq);
+
+ /* Get a new binding from Xen. */
+ bind_virq.virq = virq;
+ bind_virq.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
+ &bind_virq) != 0)
+ BUG();
+ evtchn = bind_virq.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+ }
+}
+
+static void restore_cpu_ipis(unsigned int cpu)
+{
+ struct evtchn_bind_ipi bind_ipi;
+ int ipi, irq, evtchn;
+
+ for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
+ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
+ continue;
+
+ BUG_ON(irq_info[irq].type != IRQT_IPI);
+ BUG_ON(irq_info[irq].index != ipi);
+
+ /* Get a new binding from Xen. */
+ bind_ipi.vcpu = cpu;
+ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
+ &bind_ipi) != 0)
+ BUG();
+ evtchn = bind_ipi.port;
+
+ /* Record the new mapping. */
+ evtchn_to_irq[evtchn] = irq;
+ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
+ bind_evtchn_to_cpu(evtchn, cpu);
+
+ /* Ready for use. */
+ unmask_evtchn(evtchn);
+
+ }
+}
+
+void xen_irq_resume(void)
+{
+ unsigned int cpu, irq, evtchn;
+
+ init_evtchn_cpu_bindings();
+
+ /* New event-channel space is not 'live' yet. */
+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+ mask_evtchn(evtchn);
+
+ /* No IRQ <-> event-channel mappings. */
+ for (irq = 0; irq < NR_IRQS; irq++)
+ irq_info[irq].evtchn = 0; /* zap event-channel binding */
+
+ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
+ evtchn_to_irq[evtchn] = -1;
+
+ for_each_possible_cpu(cpu) {
+ restore_cpu_virqs(cpu);
+ restore_cpu_ipis(cpu);
+ }
+}
+
static struct irq_chip xen_dynamic_chip __read_mostly = {
.name = "xen-dyn",
.mask = disable_dynirq,