summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel/irq.c')
-rw-r--r--arch/parisc/kernel/irq.c198
1 files changed, 195 insertions, 3 deletions
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 8094d3ed3b64..2e6443b1e922 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
-#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/io.h>
#include <asm/smp.h>
+#include <asm/ldcw.h>
#undef PARISC_IRQ_CR16_COUNTS
@@ -152,6 +152,53 @@ static struct irq_chip cpu_interrupt_type = {
.irq_retrigger = NULL,
};
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+#define irq_stats(x) (&per_cpu(irq_stat, x))
+
+/*
+ * /proc/interrupts printing for arch specific interrupts
+ */
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ int j;
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ seq_printf(p, "%*s: ", prec, "STK");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
+ seq_puts(p, " Kernel stack usage\n");
+# ifdef CONFIG_IRQSTACKS
+ seq_printf(p, "%*s: ", prec, "IST");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
+ seq_puts(p, " Interrupt stack usage\n");
+# endif
+#endif
+#ifdef CONFIG_SMP
+ seq_printf(p, "%*s: ", prec, "RES");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
+ seq_puts(p, " Rescheduling interrupts\n");
+ seq_printf(p, "%*s: ", prec, "CAL");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
+ seq_puts(p, " Function call interrupts\n");
+#endif
+ seq_printf(p, "%*s: ", prec, "UAH");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
+ seq_puts(p, " Unaligned access handler traps\n");
+ seq_printf(p, "%*s: ", prec, "FPA");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
+ seq_puts(p, " Floating point assist traps\n");
+ seq_printf(p, "%*s: ", prec, "TLB");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
+ seq_puts(p, " TLB shootdowns\n");
+ return 0;
+}
+
int show_interrupts(struct seq_file *p, void *v)
{
int i = *(loff_t *) v, j;
@@ -219,6 +266,9 @@ int show_interrupts(struct seq_file *p, void *v)
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
+ if (i == NR_IRQS)
+ arch_show_interrupts(p, 3);
+
return 0;
}
@@ -330,6 +380,144 @@ static inline int eirr_to_irq(unsigned long eirr)
return (BITS_PER_LONG - bit) + TIMER_IRQ;
}
+#ifdef CONFIG_IRQSTACKS
+/*
+ * IRQ STACK - used for irq handler
+ */
+#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
+
+union irq_stack_union {
+ unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
+ volatile unsigned int slock[4];
+ volatile unsigned int lock[1];
+};
+
+DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
+ .slock = { 1,1,1,1 },
+ };
+#endif
+
+
+int sysctl_panic_on_stackoverflow = 1;
+
+static inline void stack_overflow_check(struct pt_regs *regs)
+{
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+ #define STACK_MARGIN (256*6)
+
+ /* Our stack starts directly behind the thread_info struct. */
+ unsigned long stack_start = (unsigned long) current_thread_info();
+ unsigned long sp = regs->gr[30];
+ unsigned long stack_usage;
+ unsigned int *last_usage;
+ int cpu = smp_processor_id();
+
+ /* if sr7 != 0, we interrupted a userspace process which we do not want
+ * to check for stack overflow. We will only check the kernel stack. */
+ if (regs->sr[7])
+ return;
+
+ /* calculate kernel stack usage */
+ stack_usage = sp - stack_start;
+#ifdef CONFIG_IRQSTACKS
+ if (likely(stack_usage <= THREAD_SIZE))
+ goto check_kernel_stack; /* found kernel stack */
+
+ /* check irq stack usage */
+ stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
+ stack_usage = sp - stack_start;
+
+ last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
+ if (unlikely(stack_usage > *last_usage))
+ *last_usage = stack_usage;
+
+ if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
+ return;
+
+ pr_emerg("stackcheck: %s will most likely overflow irq stack "
+ "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
+ goto panic_check;
+
+check_kernel_stack:
+#endif
+
+ /* check kernel stack usage */
+ last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
+
+ if (unlikely(stack_usage > *last_usage))
+ *last_usage = stack_usage;
+
+ if (likely(stack_usage < (THREAD_SIZE - STACK_MARGIN)))
+ return;
+
+ pr_emerg("stackcheck: %s will most likely overflow kernel stack "
+ "(sp:%lx, stk bottom-top:%lx-%lx)\n",
+ current->comm, sp, stack_start, stack_start + THREAD_SIZE);
+
+#ifdef CONFIG_IRQSTACKS
+panic_check:
+#endif
+ if (sysctl_panic_on_stackoverflow)
+ panic("low stack detected by irq handler - check messages\n");
+#endif
+}
+
+#ifdef CONFIG_IRQSTACKS
+/* in entry.S: */
+void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
+
+static void execute_on_irq_stack(void *func, unsigned long param1)
+{
+ union irq_stack_union *union_ptr;
+ unsigned long irq_stack;
+ volatile unsigned int *irq_stack_in_use;
+
+ union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
+ irq_stack = (unsigned long) &union_ptr->stack;
+ irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
+ 64); /* align for stack frame usage */
+
+ /* We may be called recursive. If we are already using the irq stack,
+ * just continue to use it. Use spinlocks to serialize
+ * the irq stack usage.
+ */
+ irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
+ if (!__ldcw(irq_stack_in_use)) {
+ void (*direct_call)(unsigned long p1) = func;
+
+ /* We are using the IRQ stack already.
+ * Do direct call on current stack. */
+ direct_call(param1);
+ return;
+ }
+
+ /* This is where we switch to the IRQ stack. */
+ call_on_stack(param1, func, irq_stack);
+
+ /* free up irq stack usage. */
+ *irq_stack_in_use = 1;
+}
+
+asmlinkage void do_softirq(void)
+{
+ __u32 pending;
+ unsigned long flags;
+
+ if (in_interrupt())
+ return;
+
+ local_irq_save(flags);
+
+ pending = local_softirq_pending();
+
+ if (pending)
+ execute_on_irq_stack(__do_softirq, 0);
+
+ local_irq_restore(flags);
+}
+#endif /* CONFIG_IRQSTACKS */
+
/* ONLY called from entry.S:intr_extint() */
void do_cpu_irq_mask(struct pt_regs *regs)
{
@@ -364,7 +552,13 @@ void do_cpu_irq_mask(struct pt_regs *regs)
goto set_out;
}
#endif
+ stack_overflow_check(regs);
+
+#ifdef CONFIG_IRQSTACKS
+ execute_on_irq_stack(&generic_handle_irq, irq);
+#else
generic_handle_irq(irq);
+#endif /* CONFIG_IRQSTACKS */
out:
irq_exit();
@@ -420,6 +614,4 @@ void __init init_IRQ(void)
cpu_eiem = EIEM_MASK(TIMER_IRQ);
#endif
set_eiem(cpu_eiem); /* EIEM : enable all external intr */
-
}
-