summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2014-02-19 17:23:52 +0100
committerThomas Gleixner <tglx@linutronix.de>2014-02-19 17:23:52 +0100
commit1bc38a1d33a09949cfb6a12349e3c8dac867a5ab (patch)
treee3fa41f522857e3416854368b59e2e08c571f55d /kernel/irq/manage.c
parentMerge tag 'mvebu-irqchip-3.14' of git://git.infradead.org/linux-mvebu into ir... (diff)
parentgenirq: Provide irq_wake_thread() (diff)
downloadlinux-1bc38a1d33a09949cfb6a12349e3c8dac867a5ab.tar.xz
linux-1bc38a1d33a09949cfb6a12349e3c8dac867a5ab.zip
Merge branch 'irq/for-arm' into irq/core
Pull the functionality which is required to cleanup sdhci/sdio in. It's in a separate branch so it can be pulled from others Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c97
1 files changed, 77 insertions, 20 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 481a13c43b17..54eb5c99351b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -32,24 +32,10 @@ static int __init setup_forced_irqthreads(char *arg)
early_param("threadirqs", setup_forced_irqthreads);
#endif
-/**
- * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
- * @irq: interrupt number to wait for
- *
- * This function waits for any pending IRQ handlers for this interrupt
- * to complete before returning. If you use this function while
- * holding a resource the IRQ handler may need you will deadlock.
- *
- * This function may be called - with care - from IRQ context.
- */
-void synchronize_irq(unsigned int irq)
+static void __synchronize_hardirq(struct irq_desc *desc)
{
- struct irq_desc *desc = irq_to_desc(irq);
bool inprogress;
- if (!desc)
- return;
-
do {
unsigned long flags;
@@ -67,12 +53,56 @@ void synchronize_irq(unsigned int irq)
/* Oops, that failed? */
} while (inprogress);
+}
- /*
- * We made sure that no hardirq handler is running. Now verify
- * that no threaded handlers are active.
- */
- wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
+/**
+ * synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
+ * @irq: interrupt number to wait for
+ *
+ * This function waits for any pending hard IRQ handlers for this
+ * interrupt to complete before returning. If you use this
+ * function while holding a resource the IRQ handler may need you
+ * will deadlock. It does not take associated threaded handlers
+ * into account.
+ *
+ * Do not use this for shutdown scenarios where you must be sure
+ * that all parts (hardirq and threaded handler) have completed.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void synchronize_hardirq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc)
+ __synchronize_hardirq(desc);
+}
+EXPORT_SYMBOL(synchronize_hardirq);
+
+/**
+ * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
+ * @irq: interrupt number to wait for
+ *
+ * This function waits for any pending IRQ handlers for this interrupt
+ * to complete before returning. If you use this function while
+ * holding a resource the IRQ handler may need you will deadlock.
+ *
+ * This function may be called - with care - from IRQ context.
+ */
+void synchronize_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc) {
+ __synchronize_hardirq(desc);
+ /*
+ * We made sure that no hardirq handler is
+ * running. Now verify that no threaded handlers are
+ * active.
+ */
+ wait_event(desc->wait_for_threads,
+ !atomic_read(&desc->threads_active));
+ }
}
EXPORT_SYMBOL(synchronize_irq);
@@ -881,6 +911,33 @@ static int irq_thread(void *data)
return 0;
}
+/**
+ * irq_wake_thread - wake the irq thread for the action identified by dev_id
+ * @irq: Interrupt line
+ * @dev_id: Device identity for which the thread should be woken
+ *
+ */
+void irq_wake_thread(unsigned int irq, void *dev_id)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+ struct irqaction *action;
+ unsigned long flags;
+
+ if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
+ return;
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ for (action = desc->action; action; action = action->next) {
+ if (action->dev_id == dev_id) {
+ if (action->thread)
+ __irq_wake_thread(desc, action);
+ break;
+ }
+ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+EXPORT_SYMBOL_GPL(irq_wake_thread);
+
static void irq_setup_forced_threading(struct irqaction *new)
{
if (!force_irqthreads)