From c305d524e5dd3c3c7a6035083e30950bea1b52dc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 2 Feb 2011 17:10:48 +0100 Subject: softirq: Avoid stack switch from ksoftirqd ksoftirqd() calls do_softirq() which switches stacks on several architectures. That makes no sense at all. ksoftirqd's stack is sufficient. Call __do_softirq() directly. Signed-off-by: Thomas Gleixner Acked-by: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Heiko Carstens Acked-by: David Miller Cc: Paul Mundt Reviewed-by: Frank Rowand LKML-Reference: --- kernel/softirq.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/softirq.c') diff --git a/kernel/softirq.c b/kernel/softirq.c index 68eb5efec388..c0490464e92f 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -738,7 +738,10 @@ static int run_ksoftirqd(void * __bind_cpu) don't process */ if (cpu_is_offline((long)__bind_cpu)) goto wait_to_die; - do_softirq(); + local_irq_disable(); + if (local_softirq_pending()) + __do_softirq(); + local_irq_enable(); preempt_enable_no_resched(); cond_resched(); preempt_disable(); -- cgit v1.2.3 From 8d32a307e4faa8b123dc8a9cd56d1a7525f69ad3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 23 Feb 2011 23:52:23 +0000 Subject: genirq: Provide forced interrupt threading Add a commandline parameter "threadirqs" which forces all interrupts except those marked IRQF_NO_THREAD to run threaded. That's mostly a debug option to allow retrieving better debug data from crashing interrupt handlers. If "threadirqs" is not enabled on the kernel command line, then there is no impact in the interrupt hotpath. Architecture code needs to select CONFIG_IRQ_FORCED_THREADING after marking the interrupts which cant be threaded IRQF_NO_THREAD. All interrupts which have IRQF_TIMER set are implict marked IRQF_NO_THREAD. Also all PER_CPU interrupts are excluded. Forced threading hard interrupts also forces all soft interrupt handling into thread context. When enabled it might slow down things a bit, but for debugging problems in interrupt code it's a reasonable penalty as it does not immediately crash and burn the machine when an interrupt handler is buggy. Some test results on a Core2Duo machine: Cache cold run of: # time git grep irq_desc non-threaded threaded real 1m18.741s 1m19.061s user 0m1.874s 0m1.757s sys 0m5.843s 0m5.427s # iperf -c server non-threaded [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec threaded [ 3] 0.0-10.0 sec 1.09 GBytes 939 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 937 Mbits/sec Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra LKML-Reference: <20110223234956.772668648@linutronix.de> --- Documentation/kernel-parameters.txt | 4 +++ include/linux/interrupt.h | 7 ++++ kernel/irq/Kconfig | 3 ++ kernel/irq/internals.h | 2 ++ kernel/irq/manage.c | 67 ++++++++++++++++++++++++++++++++++--- kernel/softirq.c | 16 +++++++-- 6 files changed, 93 insertions(+), 6 deletions(-) (limited to 'kernel/softirq.c') diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a6..cac6cf9a588c 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -2436,6 +2436,10 @@ and is between 256 and 4096 characters. It is defined in the file : poll all this frequency 0: no polling (default) + threadirqs [KNL] + Force threading of all interrupt handlers except those + marked explicitely IRQF_NO_THREAD. + topology= [S390] Format: {off | on} Specify if the kernel should make use of the cpu diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 0fc3eb9397b4..f8a8af108e0c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -383,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq) } #endif /* CONFIG_GENERIC_HARDIRQS */ + +#ifdef CONFIG_IRQ_FORCED_THREADING +extern bool force_irqthreads; +#else +#define force_irqthreads (0) +#endif + #ifndef __ARCH_SET_SOFTIRQ_PENDING #define set_softirq_pending(x) (local_softirq_pending() = (x)) #define or_softirq_pending(x) (local_softirq_pending() |= (x)) diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 355b8c7957f5..144db9dcfcde 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -38,6 +38,9 @@ config HARDIRQS_SW_RESEND config IRQ_PREFLOW_FASTEOI bool +config IRQ_FORCED_THREADING + bool + config SPARSE_IRQ bool "Support sparse irq numbering" depends on HAVE_SPARSE_IRQ diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 935bec4bfa87..6c6ec9a49027 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -27,12 +27,14 @@ extern int noirqdebug; * IRQTF_DIED - handler thread died * IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed * IRQTF_AFFINITY - irq thread is requested to adjust affinity + * IRQTF_FORCED_THREAD - irq action is force threaded */ enum { IRQTF_RUNTHREAD, IRQTF_DIED, IRQTF_WARNED, IRQTF_AFFINITY, + IRQTF_FORCED_THREAD, }; /* diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 58c861367300..acd599a43bfb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -17,6 +17,17 @@ #include "internals.h" +#ifdef CONFIG_IRQ_FORCED_THREADING +__read_mostly bool force_irqthreads; + +static int __init setup_forced_irqthreads(char *arg) +{ + force_irqthreads = true; + return 0; +} +early_param("threadirqs", setup_forced_irqthreads); +#endif + /** * synchronize_irq - wait for pending IRQ handlers (on other CPUs) * @irq: interrupt number to wait for @@ -701,6 +712,32 @@ static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } #endif +/* + * Interrupts which are not explicitely requested as threaded + * interrupts rely on the implicit bh/preempt disable of the hard irq + * context. So we need to disable bh here to avoid deadlocks and other + * side effects. + */ +static void +irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) +{ + local_bh_disable(); + action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action, false); + local_bh_enable(); +} + +/* + * Interrupts explicitely requested as threaded interupts want to be + * preemtible - many of them need to sleep and wait for slow busses to + * complete. + */ +static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) +{ + action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action, false); +} + /* * Interrupt handler thread */ @@ -711,8 +748,15 @@ static int irq_thread(void *data) }; struct irqaction *action = data; struct irq_desc *desc = irq_to_desc(action->irq); + void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); int wake; + if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, + &action->thread_flags)) + handler_fn = irq_forced_thread_fn; + else + handler_fn = irq_thread_fn; + sched_setscheduler(current, SCHED_FIFO, ¶m); current->irqaction = action; @@ -736,10 +780,7 @@ static int irq_thread(void *data) raw_spin_unlock_irq(&desc->lock); } else { raw_spin_unlock_irq(&desc->lock); - - action->thread_fn(action->irq, action->dev_id); - - irq_finalize_oneshot(desc, action, false); + handler_fn(desc, action); } wake = atomic_dec_and_test(&desc->threads_active); @@ -789,6 +830,22 @@ void exit_irq_thread(void) set_bit(IRQTF_DIED, &tsk->irqaction->flags); } +static void irq_setup_forced_threading(struct irqaction *new) +{ + if (!force_irqthreads) + return; + if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT)) + return; + + new->flags |= IRQF_ONESHOT; + + if (!new->thread_fn) { + set_bit(IRQTF_FORCED_THREAD, &new->thread_flags); + new->thread_fn = new->handler; + new->handler = irq_default_primary_handler; + } +} + /* * Internal function to register an irqaction - typically used to * allocate special interrupts that are part of the architecture. @@ -838,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) * dummy function which warns when called. */ new->handler = irq_nested_primary_handler; + } else { + irq_setup_forced_threading(new); } /* diff --git a/kernel/softirq.c b/kernel/softirq.c index c0490464e92f..a33fb2911248 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -311,9 +311,21 @@ void irq_enter(void) } #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED -# define invoke_softirq() __do_softirq() +static inline void invoke_softirq(void) +{ + if (!force_irqthreads) + __do_softirq(); + else + wakeup_softirqd(); +} #else -# define invoke_softirq() do_softirq() +static inline void invoke_softirq(void) +{ + if (!force_irqthreads) + do_softirq(); + else + wakeup_softirqd(); +} #endif /* -- cgit v1.2.3