diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-03-18 00:37:18 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-24 23:57:32 +0200 |
commit | ee523ca1e456d754d66be6deab910131e4e1dbf8 (patch) | |
tree | 346c3bf4f701a07b124af72a3da4577b2e5dcb3b /arch/x86/xen | |
parent | xen: support sysenter/sysexit if hypervisor does (diff) | |
download | linux-ee523ca1e456d754d66be6deab910131e4e1dbf8.tar.xz linux-ee523ca1e456d754d66be6deab910131e4e1dbf8.zip |
xen: implement a debug-interrupt handler
Xen supports the notion of a debug interrupt which can be triggered
from the console. For now this is implemented to show pending events,
masks and each CPU's pending event set.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r-- | arch/x86/xen/events.c | 47 | ||||
-rw-r--r-- | arch/x86/xen/smp.c | 19 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 3 |
3 files changed, 64 insertions, 5 deletions
diff --git a/arch/x86/xen/events.c b/arch/x86/xen/events.c index dcf613e17581..0140981e93c4 100644 --- a/arch/x86/xen/events.c +++ b/arch/x86/xen/events.c @@ -455,6 +455,53 @@ void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) notify_remote_via_irq(irq); } +irqreturn_t xen_debug_interrupt(int irq, void *dev_id) +{ + struct shared_info *sh = HYPERVISOR_shared_info; + int cpu = smp_processor_id(); + int i; + unsigned long flags; + static DEFINE_SPINLOCK(debug_lock); + + spin_lock_irqsave(&debug_lock, flags); + + printk("vcpu %d\n ", cpu); + + for_each_online_cpu(i) { + struct vcpu_info *v = per_cpu(xen_vcpu, i); + printk("%d: masked=%d pending=%d event_sel %08lx\n ", i, + (get_irq_regs() && i == cpu) ? !(get_irq_regs()->flags & X86_EFLAGS_IF) : v->evtchn_upcall_mask, + v->evtchn_upcall_pending, + v->evtchn_pending_sel); + } + printk("pending:\n "); + for(i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--) + printk("%08lx%s", sh->evtchn_pending[i], + i % 8 == 0 ? "\n " : " "); + printk("\nmasks:\n "); + for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) + printk("%08lx%s", sh->evtchn_mask[i], + i % 8 == 0 ? "\n " : " "); + + printk("\nunmasked:\n "); + for(i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) + printk("%08lx%s", sh->evtchn_pending[i] & ~sh->evtchn_mask[i], + i % 8 == 0 ? "\n " : " "); + + printk("\npending list:\n"); + for(i = 0; i < NR_EVENT_CHANNELS; i++) { + if (sync_test_bit(i, sh->evtchn_pending)) { + printk(" %d: event %d -> irq %d\n", + cpu_evtchn[i], i, + evtchn_to_irq[i]); + } + } + + spin_unlock_irqrestore(&debug_lock, flags); + + return IRQ_HANDLED; +} + /* * Search the CPUs pending events bitmasks. For each one found, map diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d61e4f8b07c7..92dd3dbf3ffb 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -36,8 +36,9 @@ #include "mmu.h" static cpumask_t xen_cpu_initialized_map; -static DEFINE_PER_CPU(int, resched_irq); -static DEFINE_PER_CPU(int, callfunc_irq); +static DEFINE_PER_CPU(int, resched_irq) = -1; +static DEFINE_PER_CPU(int, callfunc_irq) = -1; +static DEFINE_PER_CPU(int, debug_irq) = -1; /* * Structure and data for smp_call_function(). This is designed to minimise @@ -89,9 +90,7 @@ static __cpuinit void cpu_bringup_and_idle(void) static int xen_smp_intr_init(unsigned int cpu) { int rc; - const char *resched_name, *callfunc_name; - - per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1; + const char *resched_name, *callfunc_name, *debug_name; resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, @@ -115,6 +114,14 @@ static int xen_smp_intr_init(unsigned int cpu) goto fail; per_cpu(callfunc_irq, cpu) = rc; + debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt, + IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING, + debug_name, NULL); + if (rc < 0) + goto fail; + per_cpu(debug_irq, cpu) = rc; + return 0; fail: @@ -122,6 +129,8 @@ static int xen_smp_intr_init(unsigned int cpu) unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); if (per_cpu(callfunc_irq, cpu) >= 0) unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); + if (per_cpu(debug_irq, cpu) >= 0) + unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); return rc; } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 01d4ff2ce404..22395d20dd6e 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -2,6 +2,7 @@ #define XEN_OPS_H #include <linux/init.h> +#include <linux/irqreturn.h> /* These are code, but not functions. Defined in entry.S */ extern const char xen_hypervisor_callback[]; @@ -29,6 +30,8 @@ unsigned long xen_get_wallclock(void); int xen_set_wallclock(unsigned long time); unsigned long long xen_sched_clock(void); +irqreturn_t xen_debug_interrupt(int irq, void *dev_id); + bool xen_vcpu_stolen(int vcpu); void xen_mark_init_mm_pinned(void); |