diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 12:06:36 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 12:06:36 +0200 |
commit | a18f22a968de17b29f2310cdb7ba69163e65ec15 (patch) | |
tree | a7d56d88fad5e444d7661484109758a2f436129e /arch/mips/kernel | |
parent | blackfin: convert to clocksource_register_hz (diff) | |
parent | clocksource: convert mips to generic i8253 clocksource (diff) | |
download | linux-a18f22a968de17b29f2310cdb7ba69163e65ec15.tar.xz linux-a18f22a968de17b29f2310cdb7ba69163e65ec15.zip |
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c
Reason: Resolve conflicts so further cleanups do not conflict further
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/mips/kernel')
28 files changed, 496 insertions, 583 deletions
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index b8bb8ba60869..f305ca14351b 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -73,7 +73,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w, : "0" (5), "1" (8), "2" (5)); align_mod(align, mod); /* - * The trailing nop is needed to fullfill the two-instruction + * The trailing nop is needed to fulfill the two-instruction * requirement between reading hi/lo and staring a mult/div. * Leaving it out may cause gas insert a nop itself breaking * the desired alignment of the next chunk. diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c @@ -17,29 +17,13 @@ #include <asm/cacheflush.h> #include <asm/uasm.h> -/* - * If the Instruction Pointer is in module space (0xc0000000), return true; - * otherwise, it is in kernel space (0x80000000), return false. - * - * FIXME: This will not work when the kernel space and module space are the - * same. If they are the same, we need to modify scripts/recordmcount.pl, - * ftrace_make_nop/call() and the other related parts to ensure the - * enabling/disabling of the calling site to _mcount is right for both kernel - * and module. - */ - -static inline int in_module(unsigned long ip) -{ - return ip & 0x40000000; -} +#include <asm-generic/sections.h> #ifdef CONFIG_DYNAMIC_FTRACE #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ -#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ -#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ #define INSN_NOP 0x00000000 /* nop */ #define INSN_JAL(addr) \ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) @@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) #endif } +/* + * Check if the address is in kernel space + * + * Clone core_kernel_text() from kernel/extable.c, but doesn't call + * init_kernel_text() for Ftrace doesn't trace functions in init sections. + */ +static inline int in_kernel_space(unsigned long ip) +{ + if (ip >= (unsigned long)_stext && + ip <= (unsigned long)_etext) + return 1; + return 0; +} + static int ftrace_modify_code(unsigned long ip, unsigned int new_code) { int faulted; @@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } +/* + * The details about the calling site of mcount on MIPS + * + * 1. For kernel: + * + * move at, ra + * jal _mcount --> nop + * + * 2. For modules: + * + * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * move $12, ra_address + * jalr v1 + * sub sp, sp, 8 + * 1: offset = 5 instructions + * 2.2 For the Other situations + * + * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) + * addiu v1, v1, low_16bit_of_mcount + * move at, ra + * jalr v1 + * nop | move $12, ra_address | sub sp, sp, 8 + * 1: offset = 4 instructions + */ + +#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) +#define MCOUNT_OFFSET_INSNS 5 +#else +#define MCOUNT_OFFSET_INSNS 4 +#endif +#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) + int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { @@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, unsigned long ip = rec->ip; /* - * We have compiled module with -mlong-calls, but compiled the kernel - * without it, we need to cope with them respectively. + * If ip is in kernel space, no long call, otherwise, long call is + * needed. */ - if (in_module(ip)) { -#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * move $12, ra_address - * jalr v1 - * sub sp, sp, 8 - * 1: offset = 5 instructions - */ - new = INSN_B_1F_5; -#else - /* - * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) - * addiu v1, v1, low_16bit_of_mcount - * move at, ra - * jalr v1 - * nop | move $12, ra_address | sub sp, sp, 8 - * 1: offset = 4 instructions - */ - new = INSN_B_1F_4; -#endif - } else { - /* - * move at, ra - * jal _mcount --> nop - */ - new = INSN_NOP; - } + new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; + return ftrace_modify_code(ip, new); } @@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) unsigned int new; unsigned long ip = rec->ip; - /* ip, module: 0xc0000000, kernel: 0x80000000 */ - new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; + new = in_kernel_space(ip) ? insn_jal_ftrace_caller : + insn_lui_v1_hi16_mcount; return ftrace_modify_code(ip, new); } @@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ -unsigned long ftrace_get_parent_addr(unsigned long self_addr, - unsigned long parent, - unsigned long parent_addr, - unsigned long fp) +unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long + old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) { - unsigned long sp, ip, ra; + unsigned long sp, ip, tmp; unsigned int code; int faulted; /* - * For module, move the ip from calling site of mcount to the - * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for - * kernel, move to the instruction "move ra, at"(offset is 12) + * For module, move the ip from the return address after the + * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for + * kernel, move after the instruction "move ra, at"(offset is 16) */ - ip = self_addr - (in_module(self_addr) ? 20 : 12); + ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); /* * search the text until finding the non-store instruction or "s{d,w} * ra, offset(sp)" instruction */ do { - ip -= 4; - /* get the code at "ip": code = *(unsigned int *)ip; */ safe_load_code(code, ip, faulted); @@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * store the ra on the stack */ if ((code & S_R_SP) != S_R_SP) - return parent_addr; + return parent_ra_addr; - } while (((code & S_RA_SP) != S_RA_SP)); + /* Move to the next instruction */ + ip -= 4; + } while ((code & S_RA_SP) != S_RA_SP); sp = fp + (code & OFFSET_MASK); - /* ra = *(unsigned long *)sp; */ - safe_load_stack(ra, sp, faulted); + /* tmp = *(unsigned long *)sp; */ + safe_load_stack(tmp, sp, faulted); if (unlikely(faulted)) return 0; - if (ra == parent) + if (tmp == old_parent_ra) return sp; return 0; } @@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, * Hook the return address and push it in the stack of return addrs * in current thread info. */ -void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, +void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, unsigned long fp) { - unsigned long old; + unsigned long old_parent_ra; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; - int faulted; + int faulted, insns; if (unlikely(atomic_read(¤t->tracing_graph_pause))) return; /* - * "parent" is the stack address saved the return address of the caller - * of _mcount. + * "parent_ra_addr" is the stack address saved the return address of + * the caller of _mcount. * * if the gcc < 4.5, a leaf function does not save the return address * in the stack address, so, we "emulate" one in _mcount's stack space, @@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, * do it in ftrace_graph_caller of mcount.S. */ - /* old = *parent; */ - safe_load_stack(old, parent, faulted); + /* old_parent_ra = *parent_ra_addr; */ + safe_load_stack(old_parent_ra, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; #ifndef KBUILD_MCOUNT_RA_ADDRESS - parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, - (unsigned long)parent, fp); + parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, + old_parent_ra, (unsigned long)parent_ra_addr, fp); /* * If fails when getting the stack address of the non-leaf function's * ra, stop function graph tracer and return */ - if (parent == 0) + if (parent_ra_addr == 0) goto out; #endif - /* *parent = return_hooker; */ - safe_store_stack(return_hooker, parent, faulted); + /* *parent_ra_addr = return_hooker; */ + safe_store_stack(return_hooker, parent_ra_addr, faulted); if (unlikely(faulted)) goto out; - if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == - -EBUSY) { - *parent = old; + if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) + == -EBUSY) { + *parent_ra_addr = old_parent_ra; return; } - trace.func = self_addr; + /* + * Get the recorded ip of the current mcount calling site in the + * __mcount_loc section, which will be used to filter the function + * entries configured through the tracing/set_graph_function interface. + */ + + insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; + trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; - *parent = old; + *parent_ra_addr = old_parent_ra; } return; out: diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c index 9fadd17888d9..391221b6a6aa 100644 --- a/arch/mips/kernel/i8253.c +++ b/arch/mips/kernel/i8253.c @@ -125,84 +125,11 @@ void __init setup_pit_timer(void) setup_irq(0, &irq0); } -/* - * Since the PIT overflows every tick, its not very useful - * to just read by itself. So use jiffies to emulate a free - * running counter: - */ -static cycle_t pit_read(struct clocksource *cs) -{ - unsigned long flags; - int count; - u32 jifs; - static int old_count; - static u32 old_jifs; - - raw_spin_lock_irqsave(&i8253_lock, flags); - /* - * Although our caller may have the read side of xtime_lock, - * this is now a seqlock, and we are cheating in this routine - * by having side effects on state that we cannot undo if - * there is a collision on the seqlock and our caller has to - * retry. (Namely, old_jifs and old_count.) So we must treat - * jiffies as volatile despite the lock. We read jiffies - * before latching the timer count to guarantee that although - * the jiffies value might be older than the count (that is, - * the counter may underflow between the last point where - * jiffies was incremented and the point where we latch the - * count), it cannot be newer. - */ - jifs = jiffies; - outb_p(0x00, PIT_MODE); /* latch the count ASAP */ - count = inb_p(PIT_CH0); /* read the latched count */ - count |= inb_p(PIT_CH0) << 8; - - /* VIA686a test code... reset the latch if count > max + 1 */ - if (count > LATCH) { - outb_p(0x34, PIT_MODE); - outb_p(LATCH & 0xff, PIT_CH0); - outb(LATCH >> 8, PIT_CH0); - count = LATCH - 1; - } - - /* - * It's possible for count to appear to go the wrong way for a - * couple of reasons: - * - * 1. The timer counter underflows, but we haven't handled the - * resulting interrupt and incremented jiffies yet. - * 2. Hardware problem with the timer, not giving us continuous time, - * the counter does small "jumps" upwards on some Pentium systems, - * (see c't 95/10 page 335 for Neptun bug.) - * - * Previous attempts to handle these cases intelligently were - * buggy, so we just do the simple thing now. - */ - if (count > old_count && jifs == old_jifs) { - count = old_count; - } - old_count = count; - old_jifs = jifs; - - raw_spin_unlock_irqrestore(&i8253_lock, flags); - - count = (LATCH - 1) - count; - - return (cycle_t)(jifs * LATCH) + count; -} - -static struct clocksource clocksource_pit = { - .name = "pit", - .rating = 110, - .read = pit_read, - .mask = CLOCKSOURCE_MASK(32), -}; - static int __init init_pit_clocksource(void) { if (num_possible_cpus() > 1) /* PIT does not scale! */ return 0; - return clocksource_register_hz(&clocksource_pit, CLOCK_TICK_RATE); + return clocksource_i8253_init(); } arch_initcall(init_pit_clocksource); diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index c58176cc796b..c018696765d4 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c @@ -31,19 +31,19 @@ static int i8259A_auto_eoi = -1; DEFINE_RAW_SPINLOCK(i8259A_lock); -static void disable_8259A_irq(unsigned int irq); -static void enable_8259A_irq(unsigned int irq); -static void mask_and_ack_8259A(unsigned int irq); +static void disable_8259A_irq(struct irq_data *d); +static void enable_8259A_irq(struct irq_data *d); +static void mask_and_ack_8259A(struct irq_data *d); static void init_8259A(int auto_eoi); static struct irq_chip i8259A_chip = { - .name = "XT-PIC", - .mask = disable_8259A_irq, - .disable = disable_8259A_irq, - .unmask = enable_8259A_irq, - .mask_ack = mask_and_ack_8259A, + .name = "XT-PIC", + .irq_mask = disable_8259A_irq, + .irq_disable = disable_8259A_irq, + .irq_unmask = enable_8259A_irq, + .irq_mask_ack = mask_and_ack_8259A, #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF - .set_affinity = plat_set_irq_affinity, + .irq_set_affinity = plat_set_irq_affinity, #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ }; @@ -59,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff; #define cached_master_mask (cached_irq_mask) #define cached_slave_mask (cached_irq_mask >> 8) -static void disable_8259A_irq(unsigned int irq) +static void disable_8259A_irq(struct irq_data *d) { - unsigned int mask; + unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; - irq -= I8259A_IRQ_BASE; mask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask |= mask; @@ -75,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq) raw_spin_unlock_irqrestore(&i8259A_lock, flags); } -static void enable_8259A_irq(unsigned int irq) +static void enable_8259A_irq(struct irq_data *d) { - unsigned int mask; + unsigned int mask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; - irq -= I8259A_IRQ_BASE; mask = ~(1 << irq); raw_spin_lock_irqsave(&i8259A_lock, flags); cached_irq_mask &= mask; @@ -112,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq) void make_8259A_irq(unsigned int irq) { disable_irq_nosync(irq); - set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); + irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq); enable_irq(irq); } @@ -145,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq) * first, _then_ send the EOI, and the order of EOI * to the two 8259s is important! */ -static void mask_and_ack_8259A(unsigned int irq) +static void mask_and_ack_8259A(struct irq_data *d) { - unsigned int irqmask; + unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE; unsigned long flags; - irq -= I8259A_IRQ_BASE; irqmask = 1 << irq; raw_spin_lock_irqsave(&i8259A_lock, flags); /* @@ -290,9 +287,9 @@ static void init_8259A(int auto_eoi) * In AEOI mode we just have to mask the interrupt * when acking. */ - i8259A_chip.mask_ack = disable_8259A_irq; + i8259A_chip.irq_mask_ack = disable_8259A_irq; else - i8259A_chip.mask_ack = mask_and_ack_8259A; + i8259A_chip.irq_mask_ack = mask_and_ack_8259A; udelay(100); /* wait for 8259A to initialize */ @@ -339,8 +336,8 @@ void __init init_i8259_irqs(void) init_8259A(0); for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { - set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); - set_irq_probe(i); + irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); + irq_set_probe(i); } setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 1774271af848..0c527f652196 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c @@ -87,17 +87,10 @@ unsigned int gic_get_int(void) return i; } -static unsigned int gic_irq_startup(unsigned int irq) +static void gic_irq_ack(struct irq_data *d) { - irq -= _irqbase; - pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); - GIC_SET_INTR_MASK(irq); - return 0; -} + unsigned int irq = d->irq - _irqbase; -static void gic_irq_ack(unsigned int irq) -{ - irq -= _irqbase; pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); GIC_CLR_INTR_MASK(irq); @@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq) GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); } -static void gic_mask_irq(unsigned int irq) +static void gic_mask_irq(struct irq_data *d) { - irq -= _irqbase; + unsigned int irq = d->irq - _irqbase; pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); GIC_CLR_INTR_MASK(irq); } -static void gic_unmask_irq(unsigned int irq) +static void gic_unmask_irq(struct irq_data *d) { - irq -= _irqbase; + unsigned int irq = d->irq - _irqbase; pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); GIC_SET_INTR_MASK(irq); } @@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq) static DEFINE_SPINLOCK(gic_lock); -static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) +static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, + bool force) { + unsigned int irq = d->irq - _irqbase; cpumask_t tmp = CPU_MASK_NONE; unsigned long flags; int i; - irq -= _irqbase; pr_debug("%s(%d) called\n", __func__, irq); cpumask_and(&tmp, cpumask, cpu_online_mask); if (cpus_empty(tmp)) @@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); } - cpumask_copy(irq_desc[irq].affinity, cpumask); + cpumask_copy(d->affinity, cpumask); spin_unlock_irqrestore(&gic_lock, flags); - return 0; + return IRQ_SET_MASK_OK_NOCOPY; } #endif static struct irq_chip gic_irq_controller = { - .name = "MIPS GIC", - .startup = gic_irq_startup, - .ack = gic_irq_ack, - .mask = gic_mask_irq, - .mask_ack = gic_mask_irq, - .unmask = gic_unmask_irq, - .eoi = gic_unmask_irq, + .name = "MIPS GIC", + .irq_ack = gic_irq_ack, + .irq_mask = gic_mask_irq, + .irq_mask_ack = gic_mask_irq, + .irq_unmask = gic_unmask_irq, + .irq_eoi = gic_unmask_irq, #ifdef CONFIG_SMP - .set_affinity = gic_set_affinity, + .irq_set_affinity = gic_set_affinity, #endif }; @@ -236,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes, vpe_local_setup(numvpes); for (i = _irqbase; i < (_irqbase + numintrs); i++) - set_irq_chip(i, &gic_irq_controller); + irq_set_chip(i, &gic_irq_controller); } void __init gic_init(unsigned long gic_base_addr, diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c index 42ef81461bfc..883fc6cead36 100644 --- a/arch/mips/kernel/irq-gt641xx.c +++ b/arch/mips/kernel/irq-gt641xx.c @@ -29,64 +29,64 @@ static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); -static void ack_gt641xx_irq(unsigned int irq) +static void ack_gt641xx_irq(struct irq_data *d) { unsigned long flags; u32 cause; raw_spin_lock_irqsave(>641xx_irq_lock, flags); cause = GT_READ(GT_INTRCAUSE_OFS); - cause &= ~GT641XX_IRQ_TO_BIT(irq); + cause &= ~GT641XX_IRQ_TO_BIT(d->irq); GT_WRITE(GT_INTRCAUSE_OFS, cause); raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); } -static void mask_gt641xx_irq(unsigned int irq) +static void mask_gt641xx_irq(struct irq_data *d) { unsigned long flags; u32 mask; raw_spin_lock_irqsave(>641xx_irq_lock, flags); mask = GT_READ(GT_INTRMASK_OFS); - mask &= ~GT641XX_IRQ_TO_BIT(irq); + mask &= ~GT641XX_IRQ_TO_BIT(d->irq); GT_WRITE(GT_INTRMASK_OFS, mask); raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); } -static void mask_ack_gt641xx_irq(unsigned int irq) +static void mask_ack_gt641xx_irq(struct irq_data *d) { unsigned long flags; u32 cause, mask; raw_spin_lock_irqsave(>641xx_irq_lock, flags); mask = GT_READ(GT_INTRMASK_OFS); - mask &= ~GT641XX_IRQ_TO_BIT(irq); + mask &= ~GT641XX_IRQ_TO_BIT(d->irq); GT_WRITE(GT_INTRMASK_OFS, mask); cause = GT_READ(GT_INTRCAUSE_OFS); - cause &= ~GT641XX_IRQ_TO_BIT(irq); + cause &= ~GT641XX_IRQ_TO_BIT(d->irq); GT_WRITE(GT_INTRCAUSE_OFS, cause); raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); } -static void unmask_gt641xx_irq(unsigned int irq) +static void unmask_gt641xx_irq(struct irq_data *d) { unsigned long flags; u32 mask; raw_spin_lock_irqsave(>641xx_irq_lock, flags); mask = GT_READ(GT_INTRMASK_OFS); - mask |= GT641XX_IRQ_TO_BIT(irq); + mask |= GT641XX_IRQ_TO_BIT(d->irq); GT_WRITE(GT_INTRMASK_OFS, mask); raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); } static struct irq_chip gt641xx_irq_chip = { .name = "GT641xx", - .ack = ack_gt641xx_irq, - .mask = mask_gt641xx_irq, - .mask_ack = mask_ack_gt641xx_irq, - .unmask = unmask_gt641xx_irq, + .irq_ack = ack_gt641xx_irq, + .irq_mask = mask_gt641xx_irq, + .irq_mask_ack = mask_ack_gt641xx_irq, + .irq_unmask = unmask_gt641xx_irq, }; void gt641xx_irq_dispatch(void) @@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void) * bit31: logical or of bits[25:1]. */ for (i = 1; i < 30; i++) - set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, - >641xx_irq_chip, handle_level_irq); + irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, + >641xx_irq_chip, handle_level_irq); } diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index 6a8cd28133d5..0c6afeed89d2 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c @@ -28,8 +28,10 @@ static unsigned long _icctrl_msc; static unsigned int irq_base; /* mask off an interrupt */ -static inline void mask_msc_irq(unsigned int irq) +static inline void mask_msc_irq(struct irq_data *d) { + unsigned int irq = d->irq; + if (irq < (irq_base + 32)) MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); else @@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq) } /* unmask an interrupt */ -static inline void unmask_msc_irq(unsigned int irq) +static inline void unmask_msc_irq(struct irq_data *d) { + unsigned int irq = d->irq; + if (irq < (irq_base + 32)) MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); else @@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq) /* * Masks and ACKs an IRQ */ -static void level_mask_and_ack_msc_irq(unsigned int irq) +static void level_mask_and_ack_msc_irq(struct irq_data *d) { - mask_msc_irq(irq); + unsigned int irq = d->irq; + + mask_msc_irq(d); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); /* This actually needs to be a call into platform code */ @@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) /* * Masks and ACKs an IRQ */ -static void edge_mask_and_ack_msc_irq(unsigned int irq) +static void edge_mask_and_ack_msc_irq(struct irq_data *d) { - mask_msc_irq(irq); + unsigned int irq = d->irq; + + mask_msc_irq(d); if (!cpu_has_veic) MSCIC_WRITE(MSC01_IC_EOI, 0); else { @@ -75,15 +83,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq) } /* - * End IRQ processing - */ -static void end_msc_irq(unsigned int irq) -{ - if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) - unmask_msc_irq(irq); -} - -/* * Interrupt handler for interrupts coming from SOC-it. */ void ll_msc_irq(void) @@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set) static struct irq_chip msc_levelirq_type = { .name = "SOC-it-Level", - .ack = level_mask_and_ack_msc_irq, - .mask = mask_msc_irq, - .mask_ack = level_mask_and_ack_msc_irq, - .unmask = unmask_msc_irq, - .eoi = unmask_msc_irq, - .end = end_msc_irq, + .irq_ack = level_mask_and_ack_msc_irq, + .irq_mask = mask_msc_irq, + .irq_mask_ack = level_mask_and_ack_msc_irq, + .irq_unmask = unmask_msc_irq, + .irq_eoi = unmask_msc_irq, }; static struct irq_chip msc_edgeirq_type = { .name = "SOC-it-Edge", - .ack = edge_mask_and_ack_msc_irq, - .mask = mask_msc_irq, - .mask_ack = edge_mask_and_ack_msc_irq, - .unmask = unmask_msc_irq, - .eoi = unmask_msc_irq, - .end = end_msc_irq, + .irq_ack = edge_mask_and_ack_msc_irq, + .irq_mask = mask_msc_irq, + .irq_mask_ack = edge_mask_and_ack_msc_irq, + .irq_unmask = unmask_msc_irq, + .irq_eoi = unmask_msc_irq, }; @@ -140,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma switch (imp->im_type) { case MSC01_IRQ_EDGE: - set_irq_chip_and_handler_name(irqbase + n, - &msc_edgeirq_type, handle_edge_irq, "edge"); + irq_set_chip_and_handler_name(irqbase + n, + &msc_edgeirq_type, + handle_edge_irq, + "edge"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); else MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); break; case MSC01_IRQ_LEVEL: - set_irq_chip_and_handler_name(irqbase+n, - &msc_levelirq_type, handle_level_irq, "level"); + irq_set_chip_and_handler_name(irqbase + n, + &msc_levelirq_type, + handle_level_irq, + "level"); if (cpu_has_veic) MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); else diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c index 9731e8b47862..a8a8977d5887 100644 --- a/arch/mips/kernel/irq-rm7000.c +++ b/arch/mips/kernel/irq-rm7000.c @@ -18,23 +18,23 @@ #include <asm/mipsregs.h> #include <asm/system.h> -static inline void unmask_rm7k_irq(unsigned int irq) +static inline void unmask_rm7k_irq(struct irq_data *d) { - set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); + set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); } -static inline void mask_rm7k_irq(unsigned int irq) +static inline void mask_rm7k_irq(struct irq_data *d) { - clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); + clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); } static struct irq_chip rm7k_irq_controller = { .name = "RM7000", - .ack = mask_rm7k_irq, - .mask = mask_rm7k_irq, - .mask_ack = mask_rm7k_irq, - .unmask = unmask_rm7k_irq, - .eoi = unmask_rm7k_irq + .irq_ack = mask_rm7k_irq, + .irq_mask = mask_rm7k_irq, + .irq_mask_ack = mask_rm7k_irq, + .irq_unmask = unmask_rm7k_irq, + .irq_eoi = unmask_rm7k_irq }; void __init rm7k_cpu_irq_init(void) @@ -45,6 +45,6 @@ void __init rm7k_cpu_irq_init(void) clear_c0_intcontrol(0x00000f00); /* Mask all */ for (i = base; i < base + 4; i++) - set_irq_chip_and_handler(i, &rm7k_irq_controller, + irq_set_chip_and_handler(i, &rm7k_irq_controller, handle_percpu_irq); } diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index b7e4025b58a8..38874a4b9255 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c @@ -19,22 +19,22 @@ #include <asm/mipsregs.h> #include <asm/system.h> -static inline void unmask_rm9k_irq(unsigned int irq) +static inline void unmask_rm9k_irq(struct irq_data *d) { - set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); + set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); } -static inline void mask_rm9k_irq(unsigned int irq) +static inline void mask_rm9k_irq(struct irq_data *d) { - clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); + clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE)); } -static inline void rm9k_cpu_irq_enable(unsigned int irq) +static inline void rm9k_cpu_irq_enable(struct irq_data *d) { unsigned long flags; local_irq_save(flags); - unmask_rm9k_irq(irq); + unmask_rm9k_irq(d); local_irq_restore(flags); } @@ -43,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq) */ static void local_rm9k_perfcounter_irq_startup(void *args) { - unsigned int irq = (unsigned int) args; - - rm9k_cpu_irq_enable(irq); + rm9k_cpu_irq_enable(args); } -static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) +static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d) { - on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); + on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1); return 0; } static void local_rm9k_perfcounter_irq_shutdown(void *args) { - unsigned int irq = (unsigned int) args; unsigned long flags; local_irq_save(flags); - mask_rm9k_irq(irq); + mask_rm9k_irq(args); local_irq_restore(flags); } -static void rm9k_perfcounter_irq_shutdown(unsigned int irq) +static void rm9k_perfcounter_irq_shutdown(struct irq_data *d) { - on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); + on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1); } static struct irq_chip rm9k_irq_controller = { .name = "RM9000", - .ack = mask_rm9k_irq, - .mask = mask_rm9k_irq, - .mask_ack = mask_rm9k_irq, - .unmask = unmask_rm9k_irq, - .eoi = unmask_rm9k_irq + .irq_ack = mask_rm9k_irq, + .irq_mask = mask_rm9k_irq, + .irq_mask_ack = mask_rm9k_irq, + .irq_unmask = unmask_rm9k_irq, + .irq_eoi = unmask_rm9k_irq }; static struct irq_chip rm9k_perfcounter_irq = { .name = "RM9000", - .startup = rm9k_perfcounter_irq_startup, - .shutdown = rm9k_perfcounter_irq_shutdown, - .ack = mask_rm9k_irq, - .mask = mask_rm9k_irq, - .mask_ack = mask_rm9k_irq, - .unmask = unmask_rm9k_irq, + .irq_startup = rm9k_perfcounter_irq_startup, + .irq_shutdown = rm9k_perfcounter_irq_shutdown, + .irq_ack = mask_rm9k_irq, + .irq_mask = mask_rm9k_irq, + .irq_mask_ack = mask_rm9k_irq, + .irq_unmask = unmask_rm9k_irq, }; unsigned int rm9000_perfcount_irq; @@ -101,10 +98,10 @@ void __init rm9k_cpu_irq_init(void) clear_c0_intcontrol(0x0000f000); /* Mask all */ for (i = base; i < base + 4; i++) - set_irq_chip_and_handler(i, &rm9k_irq_controller, + irq_set_chip_and_handler(i, &rm9k_irq_controller, handle_level_irq); rm9000_perfcount_irq = base + 1; - set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, + irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, handle_percpu_irq); } diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 4f93db58a79e..9b734d74ae8e 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq) atomic_t irq_err_count; -/* - * Generic, controller-independent functions: - */ - -int show_interrupts(struct seq_file *p, void *v) +int arch_show_interrupts(struct seq_file *p, int prec) { - int i = *(loff_t *) v, j; - struct irqaction * action; - unsigned long flags; - - if (i == 0) { - seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ", j); - seq_putc(p, '\n'); - } - - if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; - if (!action) - goto skip; - seq_printf(p, "%3d: ", i); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); -#endif - seq_printf(p, " %14s", irq_desc[i].chip->name); - seq_printf(p, " %s", action->name); - - for (action=action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); - - seq_putc(p, '\n'); -skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { - seq_putc(p, '\n'); - seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); - } + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); return 0; } @@ -141,7 +102,7 @@ void __init init_IRQ(void) #endif for (i = 0; i < NR_IRQS; i++) - set_irq_noprobe(i); + irq_set_noprobe(i); arch_init_irq(); @@ -183,8 +144,8 @@ void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); check_stack_overflow(); - __DO_IRQ_SMTC_HOOK(irq); - generic_handle_irq(irq); + if (!smtc_handle_on_other_cpu(irq)) + generic_handle_irq(irq); irq_exit(); } @@ -197,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq) void __irq_entry do_IRQ_no_affinity(unsigned int irq) { irq_enter(); - __NO_AFFINITY_IRQ_SMTC_HOOK(irq); + smtc_im_backstop(irq); generic_handle_irq(irq); irq_exit(); } diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c index 0262abe09121..6e71b284f6c9 100644 --- a/arch/mips/kernel/irq_cpu.c +++ b/arch/mips/kernel/irq_cpu.c @@ -37,42 +37,38 @@ #include <asm/mipsmtregs.h> #include <asm/system.h> -static inline void unmask_mips_irq(unsigned int irq) +static inline void unmask_mips_irq(struct irq_data *d) { - set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); + set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); irq_enable_hazard(); } -static inline void mask_mips_irq(unsigned int irq) +static inline void mask_mips_irq(struct irq_data *d) { - clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); + clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); irq_disable_hazard(); } static struct irq_chip mips_cpu_irq_controller = { .name = "MIPS", - .ack = mask_mips_irq, - .mask = mask_mips_irq, - .mask_ack = mask_mips_irq, - .unmask = unmask_mips_irq, - .eoi = unmask_mips_irq, + .irq_ack = mask_mips_irq, + .irq_mask = mask_mips_irq, + .irq_mask_ack = mask_mips_irq, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, }; /* * Basically the same as above but taking care of all the MT stuff */ -#define unmask_mips_mt_irq unmask_mips_irq -#define mask_mips_mt_irq mask_mips_irq - -static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) +static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d) { unsigned int vpflags = dvpe(); - clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); + clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); - unmask_mips_mt_irq(irq); - + unmask_mips_irq(d); return 0; } @@ -80,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for mips_cpu_irq_end. */ -static void mips_mt_cpu_irq_ack(unsigned int irq) +static void mips_mt_cpu_irq_ack(struct irq_data *d) { unsigned int vpflags = dvpe(); - clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); + clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE)); evpe(vpflags); - mask_mips_mt_irq(irq); + mask_mips_irq(d); } static struct irq_chip mips_mt_cpu_irq_controller = { .name = "MIPS", - .startup = mips_mt_cpu_irq_startup, - .ack = mips_mt_cpu_irq_ack, - .mask = mask_mips_mt_irq, - .mask_ack = mips_mt_cpu_irq_ack, - .unmask = unmask_mips_mt_irq, - .eoi = unmask_mips_mt_irq, + .irq_startup = mips_mt_cpu_irq_startup, + .irq_ack = mips_mt_cpu_irq_ack, + .irq_mask = mask_mips_irq, + .irq_mask_ack = mips_mt_cpu_irq_ack, + .irq_unmask = unmask_mips_irq, + .irq_eoi = unmask_mips_irq, }; void __init mips_cpu_irq_init(void) @@ -113,10 +109,10 @@ void __init mips_cpu_irq_init(void) */ if (cpu_has_mipsmt) for (i = irq_base; i < irq_base + 2; i++) - set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, + irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller, handle_percpu_irq); for (i = irq_base + 2; i < irq_base + 8; i++) - set_irq_chip_and_handler(i, &mips_cpu_irq_controller, + irq_set_chip_and_handler(i, &mips_cpu_irq_controller, handle_percpu_irq); } diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c index 95a96f69172d..b0c55b50218e 100644 --- a/arch/mips/kernel/irq_txx9.c +++ b/arch/mips/kernel/irq_txx9.c @@ -63,9 +63,9 @@ static struct { unsigned char mode; } txx9irq[TXx9_MAX_IR] __read_mostly; -static void txx9_irq_unmask(unsigned int irq) +static void txx9_irq_unmask(struct irq_data *d) { - unsigned int irq_nr = irq - TXX9_IRQ_BASE; + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; @@ -79,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq) #endif } -static inline void txx9_irq_mask(unsigned int irq) +static inline void txx9_irq_mask(struct irq_data *d) { - unsigned int irq_nr = irq - TXX9_IRQ_BASE; + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; @@ -99,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq) #endif } -static void txx9_irq_mask_ack(unsigned int irq) +static void txx9_irq_mask_ack(struct irq_data *d) { - unsigned int irq_nr = irq - TXX9_IRQ_BASE; + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; - txx9_irq_mask(irq); + txx9_irq_mask(d); /* clear edge detection */ if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); } -static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) +static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type) { - unsigned int irq_nr = irq - TXX9_IRQ_BASE; + unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; u32 cr; u32 __iomem *crp; int ofs; @@ -139,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) static struct irq_chip txx9_irq_chip = { .name = "TXX9", - .ack = txx9_irq_mask_ack, - .mask = txx9_irq_mask, - .mask_ack = txx9_irq_mask_ack, - .unmask = txx9_irq_unmask, - .set_type = txx9_irq_set_type, + .irq_ack = txx9_irq_mask_ack, + .irq_mask = txx9_irq_mask, + .irq_mask_ack = txx9_irq_mask_ack, + .irq_unmask = txx9_irq_unmask, + .irq_set_type = txx9_irq_set_type, }; void __init txx9_irq_init(unsigned long baseaddr) @@ -154,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr) for (i = 0; i < TXx9_MAX_IR; i++) { txx9irq[i].level = 4; /* middle level */ txx9irq[i].mode = TXx9_IRCR_LOW; - set_irq_chip_and_handler(TXX9_IRQ_BASE + i, - &txx9_irq_chip, handle_level_irq); + irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, + handle_level_irq); } /* mask all IRC interrupts */ diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c @@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, return ret; } -static int mipspmu_enable(struct perf_event *event) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx; - int err = 0; - - /* To look for a free counter for this event. */ - idx = mipspmu->alloc_counter(cpuc, hwc); - if (idx < 0) { - err = idx; - goto out; - } - - /* - * If there is an event in the counter we are going to use then - * make sure it is disabled. - */ - event->hw.idx = idx; - mipspmu->disable_event(idx); - cpuc->events[idx] = event; - - /* Set the period for the event. */ - mipspmu_event_set_period(event, hwc, idx); - - /* Enable the event. */ - mipspmu->enable_event(hwc, idx); - - /* Propagate our changes to the userspace mapping. */ - perf_event_update_userpage(event); - -out: - return err; -} - static void mipspmu_event_update(struct perf_event *event, struct hw_perf_event *hwc, int idx) @@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, unsigned long flags; int shift = 64 - TOTAL_BITS; s64 prev_raw_count, new_raw_count; - s64 delta; + u64 delta; again: prev_raw_count = local64_read(&hwc->prev_count); @@ -231,32 +196,90 @@ again: return; } -static void mipspmu_disable(struct perf_event *event) +static void mipspmu_start(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (flags & PERF_EF_RELOAD) + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); + + hwc->state = 0; + + /* Set the period for the event. */ + mipspmu_event_set_period(event, hwc, hwc->idx); + + /* Enable the event. */ + mipspmu->enable_event(hwc, hwc->idx); +} + +static void mipspmu_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hwc = &event->hw; + + if (!mipspmu) + return; + + if (!(hwc->state & PERF_HES_STOPPED)) { + /* We are working on a local event. */ + mipspmu->disable_event(hwc->idx); + barrier(); + mipspmu_event_update(event, hwc, hwc->idx); + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; + } +} + +static int mipspmu_add(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int idx; + int err = 0; + perf_pmu_disable(event->pmu); - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + /* To look for a free counter for this event. */ + idx = mipspmu->alloc_counter(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } - /* We are working on a local event. */ + /* + * If there is an event in the counter we are going to use then + * make sure it is disabled. + */ + event->hw.idx = idx; mipspmu->disable_event(idx); + cpuc->events[idx] = event; - barrier(); - - mipspmu_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; + if (flags & PERF_EF_START) + mipspmu_start(event, PERF_EF_RELOAD); + /* Propagate our changes to the userspace mapping. */ perf_event_update_userpage(event); + +out: + perf_pmu_enable(event->pmu); + return err; } -static void mipspmu_unthrottle(struct perf_event *event) +static void mipspmu_del(struct perf_event *event, int flags) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; - mipspmu->enable_event(hwc, hwc->idx); + WARN_ON(idx < 0 || idx >= mipspmu->num_counters); + + mipspmu_stop(event, PERF_EF_UPDATE); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); } static void mipspmu_read(struct perf_event *event) @@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) mipspmu_event_update(event, hwc, hwc->idx); } -static struct pmu pmu = { - .enable = mipspmu_enable, - .disable = mipspmu_disable, - .unthrottle = mipspmu_unthrottle, - .read = mipspmu_read, -}; +static void mipspmu_enable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->start(); +} + +static void mipspmu_disable(struct pmu *pmu) +{ + if (mipspmu) + mipspmu->stop(); +} static atomic_t active_events = ATOMIC_INIT(0); static DEFINE_MUTEX(pmu_reserve_mutex); @@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) perf_irq = save_perf_irq; } +/* + * mipsxx/rm9000/loongson2 have different performance counters, they have + * specific low-level init routines. + */ +static void reset_counters(void *arg); +static int __hw_perf_event_init(struct perf_event *event); + +static void hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, + &pmu_reserve_mutex)) { + /* + * We must not call the destroy function with interrupts + * disabled. + */ + on_each_cpu(reset_counters, + (void *)(long)mipspmu->num_counters, 1); + mipspmu_free_irq(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int mipspmu_event_init(struct perf_event *event) +{ + int err = 0; + + switch (event->attr.type) { + case PERF_TYPE_RAW: + case PERF_TYPE_HARDWARE: + case PERF_TYPE_HW_CACHE: + break; + + default: + return -ENOENT; + } + + if (!mipspmu || event->cpu >= nr_cpumask_bits || + (event->cpu >= 0 && !cpu_online(event->cpu))) + return -ENODEV; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { + atomic_dec(&active_events); + return -ENOSPC; + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) + err = mipspmu_get_irq(); + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return err; + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err; +} + +static struct pmu pmu = { + .pmu_enable = mipspmu_enable, + .pmu_disable = mipspmu_disable, + .event_init = mipspmu_event_init, + .add = mipspmu_add, + .del = mipspmu_del, + .start = mipspmu_start, + .stop = mipspmu_stop, + .read = mipspmu_read, +}; + static inline unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) { @@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, { struct hw_perf_event fake_hwc = event->hw; - if (event->pmu && event->pmu != &pmu) - return 0; + /* Allow mixed event group. So return 1 to pass validation. */ + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) + return 1; return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; } @@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) return 0; } -/* - * mipsxx/rm9000/loongson2 have different performance counters, they have - * specific low-level init routines. - */ -static void reset_counters(void *arg); -static int __hw_perf_event_init(struct perf_event *event); - -static void hw_perf_event_destroy(struct perf_event *event) -{ - if (atomic_dec_and_mutex_lock(&active_events, - &pmu_reserve_mutex)) { - /* - * We must not call the destroy function with interrupts - * disabled. - */ - on_each_cpu(reset_counters, - (void *)(long)mipspmu->num_counters, 1); - mipspmu_free_irq(); - mutex_unlock(&pmu_reserve_mutex); - } -} - -const struct pmu *hw_perf_event_init(struct perf_event *event) -{ - int err = 0; - - if (!mipspmu || event->cpu >= nr_cpumask_bits || - (event->cpu >= 0 && !cpu_online(event->cpu))) - return ERR_PTR(-ENODEV); - - if (!atomic_inc_not_zero(&active_events)) { - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { - atomic_dec(&active_events); - return ERR_PTR(-ENOSPC); - } - - mutex_lock(&pmu_reserve_mutex); - if (atomic_read(&active_events) == 0) - err = mipspmu_get_irq(); - - if (!err) - atomic_inc(&active_events); - mutex_unlock(&pmu_reserve_mutex); - } - - if (err) - return ERR_PTR(err); - - err = __hw_perf_event_init(event); - if (err) - hw_perf_event_destroy(event); - - return err ? ERR_PTR(err) : &pmu; -} - -void hw_perf_enable(void) -{ - if (mipspmu) - mipspmu->start(); -} - -void hw_perf_disable(void) -{ - if (mipspmu) - mipspmu->stop(); -} - /* This is needed by specific irq handlers in perf_event_*.c */ static void handle_associated_event(struct cpu_hw_events *cpuc, @@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, #include "perf_event_mipsxx.c" /* Callchain handling code. */ -static inline void -callchain_store(struct perf_callchain_entry *entry, - u64 ip) -{ - if (entry->nr < PERF_MAX_STACK_DEPTH) - entry->ip[entry->nr++] = ip; -} /* * Leave userspace callchain empty for now. When we find a way to trace * the user stack callchains, we add here. */ -static void -perf_callchain_user(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_user(struct perf_callchain_entry *entry, + struct pt_regs *regs) { } @@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, while (!kstack_end(sp)) { addr = *sp++; if (__kernel_text_address(addr)) { - callchain_store(entry, addr); + perf_callchain_store(entry, addr); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; } } } -static void -perf_callchain_kernel(struct pt_regs *regs, - struct perf_callchain_entry *entry) +void perf_callchain_kernel(struct perf_callchain_entry *entry, + struct pt_regs *regs) { unsigned long sp = regs->regs[29]; #ifdef CONFIG_KALLSYMS unsigned long ra = regs->regs[31]; unsigned long pc = regs->cp0_epc; - callchain_store(entry, PERF_CONTEXT_KERNEL); if (raw_show_trace || !__kernel_text_address(pc)) { unsigned long stack_page = (unsigned long)task_stack_page(current); @@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, return; } do { - callchain_store(entry, pc); + perf_callchain_store(entry, pc); if (entry->nr >= PERF_MAX_STACK_DEPTH) break; pc = unwind_stack(current, &sp, pc, &ra); } while (pc); #else - callchain_store(entry, PERF_CONTEXT_KERNEL); save_raw_perf_callchain(entry, sp); #endif } - -static void -perf_do_callchain(struct pt_regs *regs, - struct perf_callchain_entry *entry) -{ - int is_user; - - if (!regs) - return; - - is_user = user_mode(regs); - - if (!current || !current->pid) - return; - - if (is_user && current->state != TASK_RUNNING) - return; - - if (!is_user) { - perf_callchain_kernel(regs, entry); - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - if (regs) - perf_callchain_user(regs, entry); -} - -static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); - -struct perf_callchain_entry * -perf_callchain(struct pt_regs *regs) -{ - struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); - - entry->nr = 0; - perf_do_callchain(regs, entry); - return entry; -} diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..75266ff4cc33 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c @@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) * interrupt, not NMI. */ if (handled == IRQ_HANDLED) - perf_event_do_pending(); + irq_work_run(); #ifdef CONFIG_MIPS_MT_SMP read_unlock(&pmuint_rwlock); @@ -721,7 +721,7 @@ static void mipsxx_pmu_start(void) /* * MIPS performance counters can be per-TC. The control registers can - * not be directly accessed accross CPUs. Hence if we want to do global + * not be directly accessed across CPUs. Hence if we want to do global * control, we need cross CPU calls. on_each_cpu() can help us, but we * can not make sure this function is called with interrupts enabled. So * here we pause local counters and then grab a rwlock and leave the @@ -1045,6 +1045,8 @@ init_hw_perf_events(void) "CPU, irq %d%s\n", mipspmu->name, counters, irq, irq < 0 ? " (share with timer interrupt)" : ""); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + return 0; } early_initcall(init_hw_perf_events); diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ae167df73ddd..d2112d3cf115 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -410,7 +410,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) return 0; /* - * Return ra if an exception occured at the first instruction + * Return ra if an exception occurred at the first instruction */ if (unlikely(ofs == 0)) { pc = *ra; diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index fbaabad0e6e2..7f5468b38d4c 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -586,6 +586,10 @@ einval: li v0, -ENOSYS sys sys_fanotify_init 2 sys sys_fanotify_mark 6 sys sys_prlimit64 4 + sys sys_name_to_handle_at 5 + sys sys_open_by_handle_at 3 /* 4340 */ + sys sys_clock_adjtime 2 + sys sys_syncfs 1 .endm /* We pre-compute the number of _instruction_ bytes needed to diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 3f4179283207..a2e1fcbc41dc 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -425,4 +425,8 @@ sys_call_table: PTR sys_fanotify_init /* 5295 */ PTR sys_fanotify_mark PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR sys_open_by_handle_at + PTR sys_clock_adjtime /* 5300 */ + PTR sys_syncfs .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f08ece6d8acc..b2c7624995b8 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -425,4 +425,8 @@ EXPORT(sysn32_call_table) PTR sys_fanotify_init /* 6300 */ PTR sys_fanotify_mark PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR sys_open_by_handle_at + PTR compat_sys_clock_adjtime /* 6305 */ + PTR sys_syncfs .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 78d768a3e19d..049a9c8c49a0 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -543,4 +543,8 @@ sys_call_table: PTR sys_fanotify_init PTR sys_32_fanotify_mark PTR sys_prlimit64 + PTR sys_name_to_handle_at + PTR compat_sys_open_by_handle_at /* 4340 */ + PTR compat_sys_clock_adjtime + PTR sys_syncfs .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c @@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) static int protected_restore_fp_context(struct sigcontext __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c @@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) static int protected_restore_fp_context32(struct sigcontext32 __user *sc) { - int err, tmp; + int err, tmp __maybe_unused; while (1) { lock_fpu_owner(); own_fpu_inatomic(0); diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index c0e81418ba21..1ec56e635d04 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -120,7 +120,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action) local_irq_save(flags); - vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ + vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */ switch (action) { case SMP_CALL_FUNCTION: diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) */ static struct task_struct *cpu_idle_thread[NR_CPUS]; +struct create_idle { + struct work_struct work; + struct task_struct *idle; + struct completion done; + int cpu; +}; + +static void __cpuinit do_fork_idle(struct work_struct *work) +{ + struct create_idle *c_idle = + container_of(work, struct create_idle, work); + + c_idle->idle = fork_idle(c_idle->cpu); + complete(&c_idle->done); +} + int __cpuinit __cpu_up(unsigned int cpu) { struct task_struct *idle; @@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) * Linux can schedule processes on this slave. */ if (!cpu_idle_thread[cpu]) { - idle = fork_idle(cpu); - cpu_idle_thread[cpu] = idle; + /* + * Schedule work item to avoid forking user task + * Ported from arch/x86/kernel/smpboot.c + */ + struct create_idle c_idle = { + .cpu = cpu, + .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), + }; + + INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); + schedule_work(&c_idle.work); + wait_for_completion(&c_idle.done); + idle = cpu_idle_thread[cpu] = c_idle.idle; if (IS_ERR(idle)) panic(KERN_ERR "Fork failed for CPU %d", cpu); diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 39c08254b0f1..5a88cc4ccd5a 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity) */ } -void smtc_forward_irq(unsigned int irq) +void smtc_forward_irq(struct irq_data *d) { + unsigned int irq = d->irq; int target; /* @@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq) * and efficiency, we just pick the easiest one to find. */ - target = cpumask_first(irq_desc[irq].affinity); + target = cpumask_first(d->affinity); /* * We depend on the platform code to have correctly processed @@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq) */ /* If no one is eligible, service locally */ - if (target >= NR_CPUS) { + if (target >= NR_CPUS) do_IRQ_no_affinity(irq); - return; - } - - smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); + else + smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq); } #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ @@ -1147,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); - set_irq_handler(cpu_ipi_irq, handle_percpu_irq); + irq_set_handler(cpu_ipi_irq, handle_percpu_irq); } /* diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -383,12 +383,11 @@ save_static_function(sys_sysmips); static int __used noinline _sys_sysmips(nabi_no_regargs struct pt_regs regs) { - long cmd, arg1, arg2, arg3; + long cmd, arg1, arg2; cmd = regs.regs[4]; arg1 = regs.regs[5]; arg2 = regs.regs[6]; - arg3 = regs.regs[7]; switch (cmd) { case MIPS_ATOMIC_SET: @@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) if (arg1 & 2) set_thread_flag(TIF_LOGADE); else - clear_thread_flag(TIF_FIXADE); + clear_thread_flag(TIF_LOGADE); return 0; diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c index fb7497405510..1083ad4e1017 100644 --- a/arch/mips/kernel/time.c +++ b/arch/mips/kernel/time.c @@ -102,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void) case CPU_R4400SC: case CPU_R4400MC: /* - * The published errata for the R4400 upto 3.0 say the CPU + * The published errata for the R4400 up to 3.0 say the CPU * has the mfc0 from count bug. */ if ((current_cpu_data.processor_id & 0xff) <= 0x30) diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S index 570607b376b5..832afbb87588 100644 --- a/arch/mips/kernel/vmlinux.lds.S +++ b/arch/mips/kernel/vmlinux.lds.S @@ -115,7 +115,7 @@ SECTIONS EXIT_DATA } - PERCPU(PAGE_SIZE) + PERCPU(1 << CONFIG_MIPS_L1_CACHE_SHIFT, PAGE_SIZE) . = ALIGN(PAGE_SIZE); __init_end = .; /* freed after init ends here */ diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..dbb6b408f001 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c @@ -19,7 +19,7 @@ * VPE support module * * Provides support for loading a MIPS SP program on VPE1. - * The SP enviroment is rather simple, no tlb's. It needs to be relocatable + * The SP environment is rather simple, no tlb's. It needs to be relocatable * (or partially linked). You should initialise your stack in the startup * code. This loader looks for the symbol __start and sets up * execution to resume from there. The MIPS SDE kit contains suitable examples. @@ -148,9 +148,9 @@ struct { spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { - .vpe_list_lock = SPIN_LOCK_UNLOCKED, + .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), - .tc_list_lock = SPIN_LOCK_UNLOCKED, + .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; |