diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-10-12 00:46:09 +0200 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-10-12 00:46:09 +0200 |
commit | 91a2fcc88634663e9e13dcdfad0e4a860e64aeee (patch) | |
tree | a86b936f1057207d46e5a07ed826052ff640869a /arch | |
parent | [MIPS] Switch from to_tm to rtc_time_to_tm (diff) | |
download | linux-91a2fcc88634663e9e13dcdfad0e4a860e64aeee.tar.xz linux-91a2fcc88634663e9e13dcdfad0e4a860e64aeee.zip |
[MIPS] Consolidate all variants of MIPS cp0 timer interrupt handlers.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/mips/au1000/common/irq.c | 4 | ||||
-rw-r--r-- | arch/mips/au1000/common/time.c | 40 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/time.c | 93 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/time.c | 112 | ||||
-rw-r--r-- | arch/mips/mipssim/sim_time.c | 72 | ||||
-rw-r--r-- | arch/mips/sgi-ip22/ip22-int.c | 5 | ||||
-rw-r--r-- | arch/mips/sgi-ip22/ip22-time.c | 10 | ||||
-rw-r--r-- | arch/mips/sibyte/bcm1480/time.c | 13 | ||||
-rw-r--r-- | arch/mips/sibyte/sb1250/time.c | 13 |
10 files changed, 80 insertions, 284 deletions
diff --git a/arch/mips/au1000/common/irq.c b/arch/mips/au1000/common/irq.c index db2ba0dbfd5a..47949d6f2c10 100644 --- a/arch/mips/au1000/common/irq.c +++ b/arch/mips/au1000/common/irq.c @@ -65,8 +65,6 @@ #define EXT_INTC1_REQ1 5 /* IP 5 */ #define MIPS_TIMER_IP 7 /* IP 7 */ -extern void mips_timer_interrupt(void); - void (*board_init_irq)(void); static DEFINE_SPINLOCK(irq_lock); @@ -635,7 +633,7 @@ asmlinkage void plat_irq_dispatch(void) unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & CAUSEF_IP7) - mips_timer_interrupt(); + ll_timer_interrupt(63); else if (pending & CAUSEF_IP2) intc0_req0_irqdispatch(); else if (pending & CAUSEF_IP3) diff --git a/arch/mips/au1000/common/time.c b/arch/mips/au1000/common/time.c index fb1fd50f19a6..726c340460b6 100644 --- a/arch/mips/au1000/common/time.c +++ b/arch/mips/au1000/common/time.c @@ -64,48 +64,8 @@ static unsigned long last_pc0, last_match20; static DEFINE_SPINLOCK(time_lock); -static inline void ack_r4ktimer(unsigned long newval) -{ - write_c0_compare(newval); -} - -/* - * There are a lot of conceptually broken versions of the MIPS timer interrupt - * handler floating around. This one is rather different, but the algorithm - * is provably more robust. - */ unsigned long wtimer; -void mips_timer_interrupt(void) -{ - int irq = 63; - - irq_enter(); - kstat_this_cpu.irqs[irq]++; - - if (r4k_offset == 0) - goto null; - - do { - kstat_this_cpu.irqs[irq]++; - do_timer(1); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - r4k_cur += r4k_offset; - ack_r4ktimer(r4k_cur); - - } while (((unsigned long)read_c0_count() - - r4k_cur) < 0x7fffffff); - - irq_exit(); - return; - -null: - ack_r4ktimer(0); - irq_exit(); -} - #ifdef CONFIG_PM irqreturn_t counter0_irq(int irq, void *dev_id) { diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 137183bba54f..a7afbf2c9710 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -867,7 +867,7 @@ void ipi_decode(struct smtc_ipi *pipi) #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG clock_hang_reported[dest_copy] = 0; #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ - local_timer_interrupt(0, NULL); + local_timer_interrupt(0); irq_exit(); break; case LINUX_SMP_IPI: diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index c48ebd4b495e..d23e6825e988 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -144,7 +144,7 @@ void local_timer_interrupt(int irq, void *dev_id) * High-level timer interrupt service routines. This function * is set as irqaction->handler and is invoked through do_IRQ. */ -irqreturn_t timer_interrupt(int irq, void *dev_id) +static irqreturn_t timer_interrupt(int irq, void *dev_id) { write_seqlock(&xtime_lock); @@ -174,9 +174,10 @@ int null_perf_irq(void) return 0; } +EXPORT_SYMBOL(null_perf_irq); + int (*perf_irq)(void) = null_perf_irq; -EXPORT_SYMBOL(null_perf_irq); EXPORT_SYMBOL(perf_irq); /* @@ -208,35 +209,79 @@ static inline int handle_perf_irq (int r2) !r2; } -asmlinkage void ll_timer_interrupt(int irq) +void ll_timer_interrupt(int irq, void *dev_id) { - int r2 = cpu_has_mips_r2; + int cpu = smp_processor_id(); - irq_enter(); - kstat_this_cpu.irqs[irq]++; +#ifdef CONFIG_MIPS_MT_SMTC + /* + * In an SMTC system, one Count/Compare set exists per VPE. + * Which TC within a VPE gets the interrupt is essentially + * random - we only know that it shouldn't be one with + * IXMT set. Whichever TC gets the interrupt needs to + * send special interprocessor interrupts to the other + * TCs to make sure that they schedule, etc. + * + * That code is specific to the SMTC kernel, not to + * the a particular platform, so it's invoked from + * the general MIPS timer_interrupt routine. + */ + + /* + * We could be here due to timer interrupt, + * perf counter overflow, or both. + */ + (void) handle_perf_irq(1); + + if (read_c0_cause() & (1 << 30)) { + /* + * There are things we only want to do once per tick + * in an "MP" system. One TC of each VPE will take + * the actual timer interrupt. The others will get + * timer broadcast IPIs. We use whoever it is that takes + * the tick on VPE 0 to run the full timer_interrupt(). + */ + if (cpu_data[cpu].vpe_id == 0) { + timer_interrupt(irq, NULL); + } else { + write_c0_compare(read_c0_count() + + (mips_hpt_frequency/HZ)); + local_timer_interrupt(irq, dev_id); + } + smtc_timer_broadcast(cpu_data[cpu].vpe_id); + } +#else /* CONFIG_MIPS_MT_SMTC */ + int r2 = cpu_has_mips_r2; if (handle_perf_irq(r2)) - goto out; + return; if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) - goto out; - - timer_interrupt(irq, NULL); - -out: - irq_exit(); -} - -asmlinkage void ll_local_timer_interrupt(int irq) -{ - irq_enter(); - if (smp_processor_id() != 0) - kstat_this_cpu.irqs[irq]++; - - /* we keep interrupt disabled all the time */ - local_timer_interrupt(irq, NULL); + return; - irq_exit(); + if (cpu == 0) { + /* + * CPU 0 handles the global timer interrupt job and process + * accounting resets count/compare registers to trigger next + * timer int. + */ + timer_interrupt(irq, NULL); + } else { + /* Everyone else needs to reset the timer int here as + ll_local_timer_interrupt doesn't */ + /* + * FIXME: need to cope with counter underflow. + * More support needs to be added to kernel/time for + * counter/timer interrupts on multiple CPU's + */ + write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); + + /* + * Other CPUs should do profiling and process accounting + */ + local_timer_interrupt(irq, dev_id); + } +#endif /* CONFIG_MIPS_MT_SMTC */ } /* diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index 075f9d46f40e..345de881013c 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c @@ -68,108 +68,6 @@ static void mips_perf_dispatch(void) } /* - * Redeclare until I get around mopping the timer code insanity on MIPS. - */ -extern int null_perf_irq(void); - -extern int (*perf_irq)(void); - -/* - * Possibly handle a performance counter interrupt. - * Return true if the timer interrupt should not be checked - */ -static inline int handle_perf_irq (int r2) -{ - /* - * The performance counter overflow interrupt may be shared with the - * timer interrupt (cp0_perfcount_irq < 0). If it is and a - * performance counter has overflowed (perf_irq() == IRQ_HANDLED) - * and we can't reliably determine if a counter interrupt has also - * happened (!r2) then don't check for a timer interrupt. - */ - return (cp0_perfcount_irq < 0) && - perf_irq() == IRQ_HANDLED && - !r2; -} - -irqreturn_t mips_timer_interrupt(int irq, void *dev_id) -{ - int cpu = smp_processor_id(); - -#ifdef CONFIG_MIPS_MT_SMTC - /* - * In an SMTC system, one Count/Compare set exists per VPE. - * Which TC within a VPE gets the interrupt is essentially - * random - we only know that it shouldn't be one with - * IXMT set. Whichever TC gets the interrupt needs to - * send special interprocessor interrupts to the other - * TCs to make sure that they schedule, etc. - * - * That code is specific to the SMTC kernel, not to - * the a particular platform, so it's invoked from - * the general MIPS timer_interrupt routine. - */ - - /* - * We could be here due to timer interrupt, - * perf counter overflow, or both. - */ - (void) handle_perf_irq(1); - - if (read_c0_cause() & (1 << 30)) { - /* - * There are things we only want to do once per tick - * in an "MP" system. One TC of each VPE will take - * the actual timer interrupt. The others will get - * timer broadcast IPIs. We use whoever it is that takes - * the tick on VPE 0 to run the full timer_interrupt(). - */ - if (cpu_data[cpu].vpe_id == 0) { - timer_interrupt(irq, NULL); - } else { - write_c0_compare(read_c0_count() + - (mips_hpt_frequency/HZ)); - local_timer_interrupt(irq, dev_id); - } - smtc_timer_broadcast(); - } -#else /* CONFIG_MIPS_MT_SMTC */ - int r2 = cpu_has_mips_r2; - - if (handle_perf_irq(r2)) - goto out; - - if (r2 && ((read_c0_cause() & (1 << 30)) == 0)) - goto out; - - if (cpu == 0) { - /* - * CPU 0 handles the global timer interrupt job and process - * accounting resets count/compare registers to trigger next - * timer int. - */ - timer_interrupt(irq, NULL); - } else { - /* Everyone else needs to reset the timer int here as - ll_local_timer_interrupt doesn't */ - /* - * FIXME: need to cope with counter underflow. - * More support needs to be added to kernel/time for - * counter/timer interrupts on multiple CPU's - */ - write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ)); - - /* - * Other CPUs should do profiling and process accounting - */ - local_timer_interrupt(irq, dev_id); - } -out: -#endif /* CONFIG_MIPS_MT_SMTC */ - return IRQ_HANDLED; -} - -/* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ static unsigned int __init estimate_cpu_frequency(void) @@ -246,7 +144,7 @@ void __init plat_time_init(void) mips_scroll_message(); } -irqreturn_t mips_perf_interrupt(int irq, void *dev_id) +static irqreturn_t mips_perf_interrupt(int irq, void *dev_id) { return perf_irq(); } @@ -257,8 +155,10 @@ static struct irqaction perf_irqaction = { .name = "performance", }; -void __init plat_perf_setup(struct irqaction *irq) +void __init plat_perf_setup(void) { + struct irqaction *irq = &perf_irqaction; + cp0_perfcount_irq = -1; #ifdef MSC01E_INT_BASE @@ -297,8 +197,6 @@ void __init plat_timer_setup(struct irqaction *irq) mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; } - /* we are using the cpu counter for timer interrupts */ - irq->handler = mips_timer_interrupt; /* we use our own handler */ #ifdef CONFIG_MIPS_MT_SMTC setup_irq_smtc(mips_cpu_timer_irq, irq, 0x100 << cp0_compare_irq); #else @@ -308,5 +206,5 @@ void __init plat_timer_setup(struct irqaction *irq) set_irq_handler(mips_cpu_timer_irq, handle_percpu_irq); #endif - plat_perf_setup(&perf_irqaction); + plat_perf_setup(); } diff --git a/arch/mips/mipssim/sim_time.c b/arch/mips/mipssim/sim_time.c index 9a355e77952f..3625f7d49035 100644 --- a/arch/mips/mipssim/sim_time.c +++ b/arch/mips/mipssim/sim_time.c @@ -23,77 +23,6 @@ unsigned long cpu_khz; -irqreturn_t sim_timer_interrupt(int irq, void *dev_id) -{ -#ifdef CONFIG_SMP - int cpu = smp_processor_id(); - - /* - * CPU 0 handles the global timer interrupt job - * resets count/compare registers to trigger next timer int. - */ -#ifndef CONFIG_MIPS_MT_SMTC - if (cpu == 0) { - timer_interrupt(irq, dev_id); - } else { - /* Everyone else needs to reset the timer int here as - ll_local_timer_interrupt doesn't */ - /* - * FIXME: need to cope with counter underflow. - * More support needs to be added to kernel/time for - * counter/timer interrupts on multiple CPU's - */ - write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); - } -#else /* SMTC */ - /* - * In SMTC system, one Count/Compare set exists per VPE. - * Which TC within a VPE gets the interrupt is essentially - * random - we only know that it shouldn't be one with - * IXMT set. Whichever TC gets the interrupt needs to - * send special interprocessor interrupts to the other - * TCs to make sure that they schedule, etc. - * - * That code is specific to the SMTC kernel, not to - * the simulation platform, so it's invoked from - * the general MIPS timer_interrupt routine. - * - * We have a problem in that the interrupt vector code - * had to turn off the timer IM bit to avoid redundant - * entries, but we may never get to mips_cpu_irq_end - * to turn it back on again if the scheduler gets - * involved. So we clear the pending timer here, - * and re-enable the mask... - */ - - int vpflags = dvpe(); - write_c0_compare (read_c0_count() - 1); - clear_c0_cause(0x100 << cp0_compare_irq); - set_c0_status(0x100 << cp0_compare_irq); - irq_enable_hazard(); - evpe(vpflags); - - if (cpu_data[cpu].vpe_id == 0) - timer_interrupt(irq, dev_id); - else - write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ)); - smtc_timer_broadcast(cpu_data[cpu].vpe_id); - -#endif /* CONFIG_MIPS_MT_SMTC */ - - /* - * every CPU should do profiling and process accounting - */ - local_timer_interrupt (irq, dev_id); - - return IRQ_HANDLED; -#else - return timer_interrupt (irq, dev_id); -#endif -} - - - /* * Estimate CPU frequency. Sets mips_hpt_frequency as a side-effect */ @@ -185,7 +114,6 @@ void __init plat_timer_setup(struct irqaction *irq) } /* we are using the cpu counter for timer interrupts */ - irq->handler = sim_timer_interrupt; setup_irq(mips_cpu_timer_irq, irq); #ifdef CONFIG_SMP diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index 18348321795d..338c0d706988 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c @@ -20,10 +20,10 @@ #include <asm/mipsregs.h> #include <asm/addrspace.h> #include <asm/irq_cpu.h> - #include <asm/sgi/ioc.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ip22.h> +#include <asm/time.h> /* #define DEBUG_SGINT */ @@ -204,7 +204,6 @@ static struct irqaction map1_cascade = { #define SGI_INTERRUPTS SGINT_LOCAL3 #endif -extern void indy_r4k_timer_interrupt(void); extern void indy_8254timer_irq(void); /* @@ -243,7 +242,7 @@ asmlinkage void plat_irq_dispatch(void) * First we check for r4k counter/timer IRQ. */ if (pending & CAUSEF_IP7) - indy_r4k_timer_interrupt(); + ll_timer_interrupt(SGI_TIMER_IRQ, NULL); else if (pending & CAUSEF_IP2) indy_local0_irqdispatch(); else if (pending & CAUSEF_IP3) diff --git a/arch/mips/sgi-ip22/ip22-time.c b/arch/mips/sgi-ip22/ip22-time.c index c49418067f93..a1df1f9e26fa 100644 --- a/arch/mips/sgi-ip22/ip22-time.c +++ b/arch/mips/sgi-ip22/ip22-time.c @@ -189,16 +189,6 @@ void indy_8254timer_irq(void) irq_exit(); } -void indy_r4k_timer_interrupt(void) -{ - int irq = SGI_TIMER_IRQ; - - irq_enter(); - kstat_this_cpu.irqs[irq]++; - timer_interrupt(irq, NULL); - irq_exit(); -} - void __init plat_timer_setup(struct irqaction *irq) { /* over-write the handler, we use our own way */ diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c index 6f3f71bf4244..8519091d848b 100644 --- a/arch/mips/sibyte/bcm1480/time.c +++ b/arch/mips/sibyte/bcm1480/time.c @@ -103,18 +103,7 @@ void bcm1480_timer_interrupt(void) __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS, IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); - if (cpu == 0) { - /* - * CPU 0 handles the global timer interrupt job - */ - ll_timer_interrupt(irq); - } - else { - /* - * other CPUs should just do profiling and process accounting - */ - ll_local_timer_interrupt(irq); - } + ll_timer_interrupt(irq); } static cycle_t bcm1480_hpt_read(void) diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c index 2efffe15ff23..5bb83cd4c113 100644 --- a/arch/mips/sibyte/sb1250/time.c +++ b/arch/mips/sibyte/sb1250/time.c @@ -125,18 +125,7 @@ void sb1250_timer_interrupt(void) ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); - if (cpu == 0) { - /* - * CPU 0 handles the global timer interrupt job - */ - ll_timer_interrupt(irq); - } - else { - /* - * other CPUs should just do profiling and process accounting - */ - ll_local_timer_interrupt(irq); - } + ll_timer_interrupt(irq); } /* |