diff options
Diffstat (limited to 'arch/blackfin/mach-common/ints-priority.c')
-rw-r--r-- | arch/blackfin/mach-common/ints-priority.c | 271 |
1 files changed, 160 insertions, 111 deletions
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index a604f19d8dc3..43d9fb195c1e 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c @@ -15,6 +15,7 @@ #include <linux/kernel_stat.h> #include <linux/seq_file.h> #include <linux/irq.h> +#include <linux/sched.h> #ifdef CONFIG_IPIPE #include <linux/ipipe.h> #endif @@ -124,21 +125,21 @@ static void __init search_IAR(void) * This is for core internal IRQs */ -static void bfin_ack_noop(unsigned int irq) +static void bfin_ack_noop(struct irq_data *d) { /* Dummy function. */ } -static void bfin_core_mask_irq(unsigned int irq) +static void bfin_core_mask_irq(struct irq_data *d) { - bfin_irq_flags &= ~(1 << irq); + bfin_irq_flags &= ~(1 << d->irq); if (!hard_irqs_disabled()) hard_local_irq_enable(); } -static void bfin_core_unmask_irq(unsigned int irq) +static void bfin_core_unmask_irq(struct irq_data *d) { - bfin_irq_flags |= 1 << irq; + bfin_irq_flags |= 1 << d->irq; /* * If interrupts are enabled, IMASK must contain the same value * as bfin_irq_flags. Make sure that invariant holds. If interrupts @@ -176,6 +177,11 @@ static void bfin_internal_mask_irq(unsigned int irq) hard_local_irq_restore(flags); } +static void bfin_internal_mask_irq_chip(struct irq_data *d) +{ + bfin_internal_mask_irq(d->irq); +} + #ifdef CONFIG_SMP static void bfin_internal_unmask_irq_affinity(unsigned int irq, const struct cpumask *affinity) @@ -211,19 +217,24 @@ static void bfin_internal_unmask_irq(unsigned int irq) } #ifdef CONFIG_SMP -static void bfin_internal_unmask_irq(unsigned int irq) +static void bfin_internal_unmask_irq_chip(struct irq_data *d) { - struct irq_desc *desc = irq_to_desc(irq); - bfin_internal_unmask_irq_affinity(irq, desc->affinity); + bfin_internal_unmask_irq_affinity(d->irq, d->affinity); } -static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask) +static int bfin_internal_set_affinity(struct irq_data *d, + const struct cpumask *mask, bool force) { - bfin_internal_mask_irq(irq); - bfin_internal_unmask_irq_affinity(irq, mask); + bfin_internal_mask_irq(d->irq); + bfin_internal_unmask_irq_affinity(d->irq, mask); return 0; } +#else +static void bfin_internal_unmask_irq_chip(struct irq_data *d) +{ + bfin_internal_unmask_irq(d->irq); +} #endif #ifdef CONFIG_PM @@ -279,28 +290,33 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state) return 0; } + +static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state) +{ + return bfin_internal_set_wake(d->irq, state); +} #endif static struct irq_chip bfin_core_irqchip = { .name = "CORE", - .ack = bfin_ack_noop, - .mask = bfin_core_mask_irq, - .unmask = bfin_core_unmask_irq, + .irq_ack = bfin_ack_noop, + .irq_mask = bfin_core_mask_irq, + .irq_unmask = bfin_core_unmask_irq, }; static struct irq_chip bfin_internal_irqchip = { .name = "INTN", - .ack = bfin_ack_noop, - .mask = bfin_internal_mask_irq, - .unmask = bfin_internal_unmask_irq, - .mask_ack = bfin_internal_mask_irq, - .disable = bfin_internal_mask_irq, - .enable = bfin_internal_unmask_irq, + .irq_ack = bfin_ack_noop, + .irq_mask = bfin_internal_mask_irq_chip, + .irq_unmask = bfin_internal_unmask_irq_chip, + .irq_mask_ack = bfin_internal_mask_irq_chip, + .irq_disable = bfin_internal_mask_irq_chip, + .irq_enable = bfin_internal_unmask_irq_chip, #ifdef CONFIG_SMP - .set_affinity = bfin_internal_set_affinity, + .irq_set_affinity = bfin_internal_set_affinity, #endif #ifdef CONFIG_PM - .set_wake = bfin_internal_set_wake, + .irq_set_wake = bfin_internal_set_wake_chip, #endif }; @@ -312,33 +328,32 @@ static void bfin_handle_irq(unsigned irq) __ipipe_handle_irq(irq, ®s); ipipe_trace_irq_exit(irq); #else /* !CONFIG_IPIPE */ - struct irq_desc *desc = irq_desc + irq; - desc->handle_irq(irq, desc); + generic_handle_irq(irq); #endif /* !CONFIG_IPIPE */ } #ifdef BF537_GENERIC_ERROR_INT_DEMUX static int error_int_mask; -static void bfin_generic_error_mask_irq(unsigned int irq) +static void bfin_generic_error_mask_irq(struct irq_data *d) { - error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); + error_int_mask &= ~(1L << (d->irq - IRQ_PPI_ERROR)); if (!error_int_mask) bfin_internal_mask_irq(IRQ_GENERIC_ERROR); } -static void bfin_generic_error_unmask_irq(unsigned int irq) +static void bfin_generic_error_unmask_irq(struct irq_data *d) { bfin_internal_unmask_irq(IRQ_GENERIC_ERROR); - error_int_mask |= 1L << (irq - IRQ_PPI_ERROR); + error_int_mask |= 1L << (d->irq - IRQ_PPI_ERROR); } static struct irq_chip bfin_generic_error_irqchip = { .name = "ERROR", - .ack = bfin_ack_noop, - .mask_ack = bfin_generic_error_mask_irq, - .mask = bfin_generic_error_mask_irq, - .unmask = bfin_generic_error_unmask_irq, + .irq_ack = bfin_ack_noop, + .irq_mask_ack = bfin_generic_error_mask_irq, + .irq_mask = bfin_generic_error_mask_irq, + .irq_unmask = bfin_generic_error_unmask_irq, }; static void bfin_demux_error_irq(unsigned int int_err_irq, @@ -448,8 +463,10 @@ static void bfin_mac_status_ack_irq(unsigned int irq) } } -static void bfin_mac_status_mask_irq(unsigned int irq) +static void bfin_mac_status_mask_irq(struct irq_data *d) { + unsigned int irq = d->irq; + mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); #ifdef BF537_GENERIC_ERROR_INT_DEMUX switch (irq) { @@ -466,8 +483,10 @@ static void bfin_mac_status_mask_irq(unsigned int irq) bfin_mac_status_ack_irq(irq); } -static void bfin_mac_status_unmask_irq(unsigned int irq) +static void bfin_mac_status_unmask_irq(struct irq_data *d) { + unsigned int irq = d->irq; + #ifdef BF537_GENERIC_ERROR_INT_DEMUX switch (irq) { case IRQ_MAC_PHYINT: @@ -484,7 +503,7 @@ static void bfin_mac_status_unmask_irq(unsigned int irq) } #ifdef CONFIG_PM -int bfin_mac_status_set_wake(unsigned int irq, unsigned int state) +int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state) { #ifdef BF537_GENERIC_ERROR_INT_DEMUX return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); @@ -496,12 +515,12 @@ int bfin_mac_status_set_wake(unsigned int irq, unsigned int state) static struct irq_chip bfin_mac_status_irqchip = { .name = "MACST", - .ack = bfin_ack_noop, - .mask_ack = bfin_mac_status_mask_irq, - .mask = bfin_mac_status_mask_irq, - .unmask = bfin_mac_status_unmask_irq, + .irq_ack = bfin_ack_noop, + .irq_mask_ack = bfin_mac_status_mask_irq, + .irq_mask = bfin_mac_status_mask_irq, + .irq_unmask = bfin_mac_status_unmask_irq, #ifdef CONFIG_PM - .set_wake = bfin_mac_status_set_wake, + .irq_set_wake = bfin_mac_status_set_wake, #endif }; @@ -538,13 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq, static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) { #ifdef CONFIG_IPIPE - _set_irq_handler(irq, handle_level_irq); -#else - struct irq_desc *desc = irq_desc + irq; - /* May not call generic set_irq_handler() due to spinlock - recursion. */ - desc->handle_irq = handle; + handle = handle_level_irq; #endif + __irq_set_handler_locked(irq, handle); } static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); @@ -552,58 +567,59 @@ extern void bfin_gpio_irq_prepare(unsigned gpio); #if !defined(CONFIG_BF54x) -static void bfin_gpio_ack_irq(unsigned int irq) +static void bfin_gpio_ack_irq(struct irq_data *d) { /* AFAIK ack_irq in case mask_ack is provided * get's only called for edge sense irqs */ - set_gpio_data(irq_to_gpio(irq), 0); + set_gpio_data(irq_to_gpio(d->irq), 0); } -static void bfin_gpio_mask_ack_irq(unsigned int irq) +static void bfin_gpio_mask_ack_irq(struct irq_data *d) { - struct irq_desc *desc = irq_desc + irq; + unsigned int irq = d->irq; u32 gpionr = irq_to_gpio(irq); - if (desc->handle_irq == handle_edge_irq) + if (!irqd_is_level_type(d)) set_gpio_data(gpionr, 0); set_gpio_maska(gpionr, 0); } -static void bfin_gpio_mask_irq(unsigned int irq) +static void bfin_gpio_mask_irq(struct irq_data *d) { - set_gpio_maska(irq_to_gpio(irq), 0); + set_gpio_maska(irq_to_gpio(d->irq), 0); } -static void bfin_gpio_unmask_irq(unsigned int irq) +static void bfin_gpio_unmask_irq(struct irq_data *d) { - set_gpio_maska(irq_to_gpio(irq), 1); + set_gpio_maska(irq_to_gpio(d->irq), 1); } -static unsigned int bfin_gpio_irq_startup(unsigned int irq) +static unsigned int bfin_gpio_irq_startup(struct irq_data *d) { - u32 gpionr = irq_to_gpio(irq); + u32 gpionr = irq_to_gpio(d->irq); if (__test_and_set_bit(gpionr, gpio_enabled)) bfin_gpio_irq_prepare(gpionr); - bfin_gpio_unmask_irq(irq); + bfin_gpio_unmask_irq(d); return 0; } -static void bfin_gpio_irq_shutdown(unsigned int irq) +static void bfin_gpio_irq_shutdown(struct irq_data *d) { - u32 gpionr = irq_to_gpio(irq); + u32 gpionr = irq_to_gpio(d->irq); - bfin_gpio_mask_irq(irq); + bfin_gpio_mask_irq(d); __clear_bit(gpionr, gpio_enabled); bfin_gpio_irq_free(gpionr); } -static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) +static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) { + unsigned int irq = d->irq; int ret; char buf[16]; u32 gpionr = irq_to_gpio(irq); @@ -664,9 +680,9 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) } #ifdef CONFIG_PM -int bfin_gpio_set_wake(unsigned int irq, unsigned int state) +int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) { - return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state); + return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state); } #endif @@ -818,14 +834,13 @@ void init_pint_lut(void) } } -static void bfin_gpio_ack_irq(unsigned int irq) +static void bfin_gpio_ack_irq(struct irq_data *d) { - struct irq_desc *desc = irq_desc + irq; - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; + u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u32 bank = PINT_2_BANK(pint_val); - if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { if (pint[bank]->invert_set & pintbit) pint[bank]->invert_clear = pintbit; else @@ -835,14 +850,13 @@ static void bfin_gpio_ack_irq(unsigned int irq) } -static void bfin_gpio_mask_ack_irq(unsigned int irq) +static void bfin_gpio_mask_ack_irq(struct irq_data *d) { - struct irq_desc *desc = irq_desc + irq; - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; + u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u32 bank = PINT_2_BANK(pint_val); - if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { + if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { if (pint[bank]->invert_set & pintbit) pint[bank]->invert_clear = pintbit; else @@ -853,24 +867,25 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq) pint[bank]->mask_clear = pintbit; } -static void bfin_gpio_mask_irq(unsigned int irq) +static void bfin_gpio_mask_irq(struct irq_data *d) { - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; + u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); } -static void bfin_gpio_unmask_irq(unsigned int irq) +static void bfin_gpio_unmask_irq(struct irq_data *d) { - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; + u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; u32 pintbit = PINT_BIT(pint_val); u32 bank = PINT_2_BANK(pint_val); pint[bank]->mask_set = pintbit; } -static unsigned int bfin_gpio_irq_startup(unsigned int irq) +static unsigned int bfin_gpio_irq_startup(struct irq_data *d) { + unsigned int irq = d->irq; u32 gpionr = irq_to_gpio(irq); u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; @@ -884,22 +899,23 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq) if (__test_and_set_bit(gpionr, gpio_enabled)) bfin_gpio_irq_prepare(gpionr); - bfin_gpio_unmask_irq(irq); + bfin_gpio_unmask_irq(d); return 0; } -static void bfin_gpio_irq_shutdown(unsigned int irq) +static void bfin_gpio_irq_shutdown(struct irq_data *d) { - u32 gpionr = irq_to_gpio(irq); + u32 gpionr = irq_to_gpio(d->irq); - bfin_gpio_mask_irq(irq); + bfin_gpio_mask_irq(d); __clear_bit(gpionr, gpio_enabled); bfin_gpio_irq_free(gpionr); } -static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) +static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type) { + unsigned int irq = d->irq; int ret; char buf[16]; u32 gpionr = irq_to_gpio(irq); @@ -961,10 +977,10 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) u32 pint_saved_masks[NR_PINT_SYS_IRQS]; u32 pint_wakeup_masks[NR_PINT_SYS_IRQS]; -int bfin_gpio_set_wake(unsigned int irq, unsigned int state) +int bfin_gpio_set_wake(struct irq_data *d, unsigned int state) { u32 pint_irq; - u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; + u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS]; u32 bank = PINT_2_BANK(pint_val); u32 pintbit = PINT_BIT(pint_val); @@ -1066,17 +1082,17 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq, static struct irq_chip bfin_gpio_irqchip = { .name = "GPIO", - .ack = bfin_gpio_ack_irq, - .mask = bfin_gpio_mask_irq, - .mask_ack = bfin_gpio_mask_ack_irq, - .unmask = bfin_gpio_unmask_irq, - .disable = bfin_gpio_mask_irq, - .enable = bfin_gpio_unmask_irq, - .set_type = bfin_gpio_irq_type, - .startup = bfin_gpio_irq_startup, - .shutdown = bfin_gpio_irq_shutdown, + .irq_ack = bfin_gpio_ack_irq, + .irq_mask = bfin_gpio_mask_irq, + .irq_mask_ack = bfin_gpio_mask_ack_irq, + .irq_unmask = bfin_gpio_unmask_irq, + .irq_disable = bfin_gpio_mask_irq, + .irq_enable = bfin_gpio_unmask_irq, + .irq_set_type = bfin_gpio_irq_type, + .irq_startup = bfin_gpio_irq_startup, + .irq_shutdown = bfin_gpio_irq_shutdown, #ifdef CONFIG_PM - .set_wake = bfin_gpio_set_wake, + .irq_set_wake = bfin_gpio_set_wake, #endif }; @@ -1147,9 +1163,9 @@ int __init init_arch_irq(void) for (irq = 0; irq <= SYS_IRQS; irq++) { if (irq <= IRQ_CORETMR) - set_irq_chip(irq, &bfin_core_irqchip); + irq_set_chip(irq, &bfin_core_irqchip); else - set_irq_chip(irq, &bfin_internal_irqchip); + irq_set_chip(irq, &bfin_internal_irqchip); switch (irq) { #if defined(CONFIG_BF53x) @@ -1173,50 +1189,50 @@ int __init init_arch_irq(void) #elif defined(CONFIG_BF538) || defined(CONFIG_BF539) case IRQ_PORTF_INTA: #endif - set_irq_chained_handler(irq, - bfin_demux_gpio_irq); + irq_set_chained_handler(irq, bfin_demux_gpio_irq); break; #ifdef BF537_GENERIC_ERROR_INT_DEMUX case IRQ_GENERIC_ERROR: - set_irq_chained_handler(irq, bfin_demux_error_irq); + irq_set_chained_handler(irq, bfin_demux_error_irq); break; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) case IRQ_MAC_ERROR: - set_irq_chained_handler(irq, bfin_demux_mac_status_irq); + irq_set_chained_handler(irq, + bfin_demux_mac_status_irq); break; #endif #ifdef CONFIG_SMP case IRQ_SUPPLE_0: case IRQ_SUPPLE_1: - set_irq_handler(irq, handle_percpu_irq); + irq_set_handler(irq, handle_percpu_irq); break; #endif #ifdef CONFIG_TICKSOURCE_CORETMR case IRQ_CORETMR: # ifdef CONFIG_SMP - set_irq_handler(irq, handle_percpu_irq); + irq_set_handler(irq, handle_percpu_irq); break; # else - set_irq_handler(irq, handle_simple_irq); + irq_set_handler(irq, handle_simple_irq); break; # endif #endif #ifdef CONFIG_TICKSOURCE_GPTMR0 case IRQ_TIMER0: - set_irq_handler(irq, handle_simple_irq); + irq_set_handler(irq, handle_simple_irq); break; #endif #ifdef CONFIG_IPIPE default: - set_irq_handler(irq, handle_level_irq); + irq_set_handler(irq, handle_level_irq); break; #else /* !CONFIG_IPIPE */ default: - set_irq_handler(irq, handle_simple_irq); + irq_set_handler(irq, handle_simple_irq); break; #endif /* !CONFIG_IPIPE */ } @@ -1224,22 +1240,22 @@ int __init init_arch_irq(void) #ifdef BF537_GENERIC_ERROR_INT_DEMUX for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) - set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, + irq_set_chip_and_handler(irq, &bfin_generic_error_irqchip, handle_level_irq); #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) - set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); + irq_set_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq); #endif #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) - set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip, + irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip, handle_level_irq); #endif /* if configured as edge, then will be changed to do_edge_IRQ */ for (irq = GPIO_IRQ_BASE; irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) - set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, + irq_set_chip_and_handler(irq, &bfin_gpio_irqchip, handle_level_irq); bfin_write_IMASK(0); @@ -1373,7 +1389,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) struct ipipe_domain *this_domain = __ipipe_current_domain; struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; - int irq, s; + int irq, s = 0; if (likely(vec == EVT_IVTMR_P)) irq = IRQ_CORETMR; @@ -1423,6 +1439,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; } + /* + * We don't want Linux interrupt handlers to run at the + * current core priority level (i.e. < EVT15), since this + * might delay other interrupts handled by a high priority + * domain. Here is what we do instead: + * + * - we raise the SYNCDEFER bit to prevent + * __ipipe_handle_irq() to sync the pipeline for the root + * stage for the incoming interrupt. Upon return, that IRQ is + * pending in the interrupt log. + * + * - we raise the TIF_IRQ_SYNC bit for the current thread, so + * that _schedule_and_signal_from_int will eventually sync the + * pipeline from EVT15. + */ if (this_domain == ipipe_root_domain) { s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); barrier(); @@ -1432,6 +1463,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) __ipipe_handle_irq(irq, regs); ipipe_trace_irq_exit(irq); + if (user_mode(regs) && + !ipipe_test_foreign_stack() && + (current->ipipe_flags & PF_EVTRET) != 0) { + /* + * Testing for user_regs() does NOT fully eliminate + * foreign stack contexts, because of the forged + * interrupt returns we do through + * __ipipe_call_irqtail. In that case, we might have + * preempted a foreign stack context in a high + * priority domain, with a single interrupt level now + * pending after the irqtail unwinding is done. In + * which case user_mode() is now true, and the event + * gets dispatched spuriously. + */ + current->ipipe_flags &= ~PF_EVTRET; + __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); + } + if (this_domain == ipipe_root_domain) { set_thread_flag(TIF_IRQ_SYNC); if (!s) { |