summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAnton Blanchard <anton@samba.org>2010-01-31 21:34:06 +0100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-17 04:02:49 +0100
commit89713ed10815401a1bfe12e3a076b64048381b56 (patch)
tree2d9ce51782997e3ed265dbee7c72c3e7b0c776e3 /arch
parentpowerpc: Remove whitespace in irq chip name fields (diff)
downloadlinux-89713ed10815401a1bfe12e3a076b64048381b56.tar.xz
linux-89713ed10815401a1bfe12e3a076b64048381b56.zip
powerpc: Add timer, performance monitor and machine check counts to /proc/interrupts
With NO_HZ it is useful to know how often the decrementer is going off. The patch below adds an entry for it and also adds it into the /proc/stat summaries. While here, I added performance monitoring and machine check exceptions. I found it useful to keep an eye on the PMU exception rate when using the perf tool. Since it's possible to take a completely handled machine check on a System p box it also sounds like a good idea to keep a machine check summary. The event naming matches x86 to keep gratuitous differences to a minimum. Signed-off-by: Anton Blanchard <anton@samba.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/hardirq.h9
-rw-r--r--arch/powerpc/kernel/irq.c35
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c4
4 files changed, 50 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 9bf3467581b1..cd2d4be882aa 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -6,6 +6,9 @@
typedef struct {
unsigned int __softirq_pending;
+ unsigned int timer_irqs;
+ unsigned int pmu_irqs;
+ unsigned int mce_exceptions;
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
@@ -19,4 +22,10 @@ static inline void ack_bad_irq(unsigned int irq)
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
}
+extern u64 arch_irq_stat_cpu(unsigned int cpu);
+#define arch_irq_stat_cpu arch_irq_stat_cpu
+
+extern u64 arch_irq_stat(void);
+#define arch_irq_stat arch_irq_stat
+
#endif /* _ASM_POWERPC_HARDIRQ_H */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index b9cbb4570048..710505240f2f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -196,6 +196,21 @@ static int show_other_interrupts(struct seq_file *p, int prec)
}
#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
+ seq_printf(p, "%*s: ", prec, "LOC");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
+ seq_printf(p, " Local timer interrupts\n");
+
+ seq_printf(p, "%*s: ", prec, "CNT");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
+ seq_printf(p, " Performance monitoring interrupts\n");
+
+ seq_printf(p, "%*s: ", prec, "MCE");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
+ seq_printf(p, " Machine check exceptions\n");
+
seq_printf(p, "%*s: %10u\n", prec, "BAD", ppc_spurious_interrupts);
return 0;
@@ -258,6 +273,26 @@ out:
return 0;
}
+/*
+ * /proc/stat helpers
+ */
+u64 arch_irq_stat_cpu(unsigned int cpu)
+{
+ u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
+
+ sum += per_cpu(irq_stat, cpu).pmu_irqs;
+ sum += per_cpu(irq_stat, cpu).mce_exceptions;
+
+ return sum;
+}
+
+u64 arch_irq_stat(void)
+{
+ u64 sum = ppc_spurious_interrupts;
+
+ return sum;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map)
{
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index ed1c0f58344a..1b16b9a3e49a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -575,6 +575,8 @@ void timer_interrupt(struct pt_regs * regs)
trace_timer_interrupt_entry(regs);
+ __get_cpu_var(irq_stat).timer_irqs++;
+
/* Ensure a positive value is written to the decrementer, or else
* some CPUs will continuue to take decrementer exceptions */
set_dec(DECREMENTER_MAX);
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0a320dbd950a..895da29e7db8 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -483,6 +483,8 @@ void machine_check_exception(struct pt_regs *regs)
{
int recover = 0;
+ __get_cpu_var(irq_stat).mce_exceptions++;
+
/* See if any machine dependent calls. In theory, we would want
* to call the CPU first, and call the ppc_md. one if the CPU
* one returns a positive number. However there is existing code
@@ -965,6 +967,8 @@ void vsx_unavailable_exception(struct pt_regs *regs)
void performance_monitor_exception(struct pt_regs *regs)
{
+ __get_cpu_var(irq_stat).pmu_irqs++;
+
perf_irq(regs);
}