summaryrefslogtreecommitdiffstats
path: root/drivers/xen/evtchn.c
diff options
context:
space:
mode:
authorAnoob Soman <anoob.soman@citrix.com>2017-06-07 13:46:56 +0200
committerJuergen Gross <jgross@suse.com>2017-06-13 15:30:27 +0200
commitc48f64ab472389df6f48171899c9d337adfadc5b (patch)
tree5bd75e13eeea925c52dac81bc6c82fc8a2a72bdc /drivers/xen/evtchn.c
parentxen: avoid type warning in xchg_xen_ulong (diff)
downloadlinux-c48f64ab472389df6f48171899c9d337adfadc5b.tar.xz
linux-c48f64ab472389df6f48171899c9d337adfadc5b.zip
xen-evtchn: Bind dyn evtchn:qemu-dm interrupt to next online VCPU
A HVM domian booting generates around 200K (evtchn:qemu-dm xen-dyn) interrupts,in a short period of time. All these evtchn:qemu-dm are bound to VCPU 0, until irqbalance sees these IRQ and moves it to a different VCPU. In one configuration, irqbalance runs every 10 seconds, which means irqbalance doesn't get to see these burst of interrupts and doesn't re-balance interrupts most of the time, making all evtchn:qemu-dm to be processed by VCPU0. This cause VCPU0 to spend most of time processing hardirq and very little time on softirq. Moreover, if dom0 kernel PREEMPTION is disabled, VCPU0 never runs watchdog (process context), triggering a softlockup detection code to panic. Binding evtchn:qemu-dm to next online VCPU, will spread hardirq processing evenly across different CPU. Later, irqbalance will try to balance evtchn:qemu-dm, if required. Signed-off-by: Anoob Soman <anoob.soman@citrix.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
Diffstat (limited to 'drivers/xen/evtchn.c')
-rw-r--r--drivers/xen/evtchn.c34
1 files changed, 33 insertions, 1 deletions
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 10f1ef582659..9729a64ea1a9 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -421,6 +421,36 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
del_evtchn(u, evtchn);
}
+static DEFINE_PER_CPU(int, bind_last_selected_cpu);
+
+static void evtchn_bind_interdom_next_vcpu(int evtchn)
+{
+ unsigned int selected_cpu, irq;
+ struct irq_desc *desc;
+ unsigned long flags;
+
+ irq = irq_from_evtchn(evtchn);
+ desc = irq_to_desc(irq);
+
+ if (!desc)
+ return;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ selected_cpu = this_cpu_read(bind_last_selected_cpu);
+ selected_cpu = cpumask_next_and(selected_cpu,
+ desc->irq_common_data.affinity, cpu_online_mask);
+
+ if (unlikely(selected_cpu >= nr_cpu_ids))
+ selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
+ cpu_online_mask);
+
+ this_cpu_write(bind_last_selected_cpu, selected_cpu);
+
+ /* unmask expects irqs to be disabled */
+ xen_rebind_evtchn_to_cpu(evtchn, selected_cpu);
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
@@ -478,8 +508,10 @@ static long evtchn_ioctl(struct file *file,
break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
- if (rc == 0)
+ if (rc == 0) {
rc = bind_interdomain.local_port;
+ evtchn_bind_interdom_next_vcpu(rc);
+ }
break;
}