diff options
author | Michal Marek <mmarek@suse.cz> | 2010-08-04 13:59:13 +0200 |
---|---|---|
committer | Michal Marek <mmarek@suse.cz> | 2010-08-04 13:59:13 +0200 |
commit | 772320e84588dcbe1600ffb83e5f328f2209ac2a (patch) | |
tree | a7de21b79340aeaa17c58126f6b801b82c77b53a /arch/arm/kernel | |
parent | modpost: support objects with more than 64k sections (diff) | |
parent | Linux 2.6.35 (diff) | |
download | linux-772320e84588dcbe1600ffb83e5f328f2209ac2a.tar.xz linux-772320e84588dcbe1600ffb83e5f328f2209ac2a.zip |
Merge commit 'v2.6.35' into kbuild/kbuild
Conflicts:
arch/powerpc/Makefile
Diffstat (limited to 'arch/arm/kernel')
30 files changed, 3734 insertions, 590 deletions
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index dd00f747e2ad..26d302c28e13 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile @@ -17,6 +17,7 @@ obj-y := compat.o elf.o entry-armv.o entry-common.o irq.o \ process.o ptrace.o return_address.o setup.o signal.o \ sys_arm.o stacktrace.o time.o traps.o +obj-$(CONFIG_LEDS) += leds.o obj-$(CONFIG_OC_ETM) += etm.o obj-$(CONFIG_ISA_DMA_API) += dma.o @@ -46,6 +47,8 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o obj-$(CONFIG_IWMMXT) += iwmmxt.o +obj-$(CONFIG_CPU_HAS_PMU) += pmu.o +obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt ifneq ($(CONFIG_ARCH_EBSA110),y) diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 4a881258bb17..883511522fca 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -12,6 +12,7 @@ */ #include <linux/sched.h> #include <linux/mm.h> +#include <linux/dma-mapping.h> #include <asm/mach/arch.h> #include <asm/thread_info.h> #include <asm/memory.h> @@ -112,5 +113,9 @@ int main(void) #ifdef MULTI_PABORT DEFINE(PROCESSOR_PABT_FUNC, offsetof(struct processor, _prefetch_abort)); #endif + BLANK(); + DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); + DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); + DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); return 0; } diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c index 809681900ec8..c6273a3bfc25 100644 --- a/arch/arm/kernel/bios32.c +++ b/arch/arm/kernel/bios32.c @@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw) if (!sys) panic("PCI: unable to allocate sys data!"); +#ifdef CONFIG_PCI_DOMAINS + sys->domain = hw->domain; +#endif sys->hw = hw; sys->busnr = busnr; sys->swizzle = hw->swizzle; @@ -616,15 +619,17 @@ char * __init pcibios_setup(char *str) * but we want to try to avoid allocating at 0x2900-0x2bff * which might be mirrored at 0x0100-0x03ff.. */ -void pcibios_align_resource(void *data, struct resource *res, - resource_size_t size, resource_size_t align) +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) { resource_size_t start = res->start; if (res->flags & IORESOURCE_IO && start & 0x300) start = (start + 0x3ff) & ~0x3ff; - res->start = (start + align - 1) & ~(align - 1); + start = (start + align - 1) & ~(align - 1); + + return start; } /** diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 9314a2d681f1..37ae301cc47c 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -91,7 +91,7 @@ CALL(sys_settimeofday) /* 80 */ CALL(sys_getgroups16) CALL(sys_setgroups16) - CALL(OBSOLETE(old_select)) /* used by libc4 */ + CALL(OBSOLETE(sys_old_select)) /* used by libc4 */ CALL(sys_symlink) CALL(sys_ni_syscall) /* was sys_lstat */ /* 85 */ CALL(sys_readlink) @@ -99,7 +99,7 @@ CALL(sys_swapon) CALL(sys_reboot) CALL(OBSOLETE(sys_old_readdir)) /* used by libc4 */ -/* 90 */ CALL(OBSOLETE(old_mmap)) /* used by libc4 */ +/* 90 */ CALL(OBSOLETE(sys_old_mmap)) /* used by libc4 */ CALL(sys_munmap) CALL(sys_truncate) CALL(sys_ftruncate) diff --git a/arch/arm/kernel/debug.S b/arch/arm/kernel/debug.S index 5c91addcaebc..a38b4879441d 100644 --- a/arch/arm/kernel/debug.S +++ b/arch/arm/kernel/debug.S @@ -24,7 +24,7 @@ #if defined(CONFIG_CPU_V6) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -51,7 +51,7 @@ #elif defined(CONFIG_CPU_V7) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -71,7 +71,7 @@ wait: mrc p14, 0, pc, c0, c1, 0 #elif defined(CONFIG_CPU_XSCALE) - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -98,7 +98,7 @@ wait: mrc p14, 0, pc, c0, c1, 0 #else - .macro addruart, rx + .macro addruart, rx, tmp .endm .macro senduart, rd, rx @@ -164,7 +164,7 @@ ENDPROC(printhex2) .ltorg ENTRY(printascii) - addruart r3 + addruart r3, r1 b 2f 1: waituart r2, r3 senduart r1, r3 @@ -180,7 +180,7 @@ ENTRY(printascii) ENDPROC(printascii) ENTRY(printch) - addruart r3 + addruart r3, r1 mov r1, r0 mov r0, #0 b 1b diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c index 7d5b9fb01e71..2c4a185f92cd 100644 --- a/arch/arm/kernel/dma.c +++ b/arch/arm/kernel/dma.c @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> #include <asm/dma.h> @@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan) return ret; } EXPORT_SYMBOL(get_dma_residue); + +#ifdef CONFIG_PROC_FS +static int proc_dma_show(struct seq_file *m, void *v) +{ + int i; + + for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { + dma_t *dma = dma_channel(i); + if (dma && dma->lock) + seq_printf(m, "%2d: %s\n", i, dma->device_id); + } + return 0; +} + +static int proc_dma_open(struct inode *inode, struct file *file) +{ + return single_open(file, proc_dma_show, NULL); +} + +static const struct file_operations proc_dma_operations = { + .open = proc_dma_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int __init proc_dma_init(void) +{ + proc_create("dma", 0, NULL, &proc_dma_operations); + return 0; +} + +__initcall(proc_dma_init); +#endif diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 6c5cf369183b..3fd7861de4d1 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -162,8 +162,6 @@ ENDPROC(__und_invalid) @ r4 - orig_r0 (see pt_regs definition in ptrace.h) @ stmia r5, {r0 - r4} - - asm_trace_hardirqs_off .endm .align 5 @@ -204,7 +202,7 @@ __dabt_svc: @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -218,6 +216,9 @@ ENDPROC(__dabt_svc) __irq_svc: svc_entry +#ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_off +#endif #ifdef CONFIG_PREEMPT get_thread_info tsk ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -291,7 +292,7 @@ __und_svc: @ @ IRQs off again before pulling preserved data off the stack @ -1: disable_irq +1: disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -327,7 +328,7 @@ __pabt_svc: @ @ IRQs off again before pulling preserved data off the stack @ - disable_irq + disable_irq_notrace @ @ restore SPSR and restart the instruction @@ -393,8 +394,6 @@ ENDPROC(__pabt_svc) @ Clear FP to mark the first stack frame @ zero_fp - - asm_trace_hardirqs_off .endm .macro kuser_cmpxchg_check @@ -465,9 +464,6 @@ __irq_usr: THUMB( movne r0, #0 ) THUMB( strne r0, [r0] ) #endif -#ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -#endif mov why, #0 b ret_to_user @@ -523,16 +519,16 @@ ENDPROC(__und_usr) /* * The out of line fixup for the ldrt above. */ - .section .fixup, "ax" + .pushsection .fixup, "ax" 4: mov pc, r9 - .previous - .section __ex_table,"a" + .popsection + .pushsection __ex_table,"a" .long 1b, 4b #if __LINUX_ARM_ARCH__ >= 7 .long 2b, 4b .long 3b, 4b #endif - .previous + .popsection /* * Check whether the instruction is a co-processor instruction. @@ -676,10 +672,10 @@ do_fpe: * lr = unrecognised FP instruction return address */ - .data + .pushsection .data ENTRY(fp_enter) .word no_fp - .previous + .popsection ENTRY(no_fp) mov pc, lr diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 7e9ed1eea40a..d93f976fb389 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -102,6 +102,8 @@ .else ldmdb sp, {r0 - lr}^ @ get calling r0 - lr .endif + mov r0, r0 @ ARMv5T and earlier require a nop + @ after ldm {}^ add sp, sp, #S_FRAME_SIZE - S_PC movs pc, lr @ return & move spsr_svc into cpsr .endm diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index c63842766229..0298286ad4ad 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c @@ -62,15 +62,15 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code, " movne %0, #2 \n" "3:\n" - ".section .fixup, \"ax\"\n" + ".pushsection .fixup, \"ax\"\n" "4: mov %0, #1 \n" " b 3b \n" - ".previous\n" + ".popsection\n" - ".section __ex_table, \"a\"\n" + ".pushsection __ex_table, \"a\"\n" " .long 1b, 4b \n" " .long 2b, 4b \n" - ".previous\n" + ".popsection\n" : "=r"(err), "=r"(replaced) : "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced) diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index b7cb45bb91e8..3b3d2c80509c 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -27,7 +27,6 @@ #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/slab.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index ba8ccfede964..c868a8864117 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c @@ -9,6 +9,7 @@ * Authors: George Davis <davis_g@mvista.com> * Deepak Saxena <dsaxena@plexity.net> */ +#include <linux/irq.h> #include <linux/kgdb.h> #include <asm/traps.h> @@ -97,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task) gdb_regs[_CPSR] = thread_regs->ARM_cpsr; } +void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) +{ + regs->ARM_pc = pc; +} + static int compiled_break; int kgdb_arch_handle_exception(int exception_vector, int signo, @@ -158,6 +164,18 @@ static struct undef_hook kgdb_compiled_brkpt_hook = { .fn = kgdb_compiled_brk_fn }; +static void kgdb_call_nmi_hook(void *ignored) +{ + kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); +} + +void kgdb_roundup_cpus(unsigned long flags) +{ + local_irq_enable(); + smp_call_function(kgdb_call_nmi_hook, NULL, 0); + local_irq_disable(); +} + /** * kgdb_arch_init - Perform any architecture specific initalization. * diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index da1f94906a4e..8bccbfa693ff 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c @@ -583,13 +583,14 @@ static void __kprobes emulate_ldr(struct kprobe *p, struct pt_regs *regs) { insn_llret_3arg_fn_t *i_fn = (insn_llret_3arg_fn_t *)&p->ainsn.insn[0]; kprobe_opcode_t insn = p->opcode; + long ppc = (long)p->addr + 8; union reg_pair fnr; int rd = (insn >> 12) & 0xf; int rn = (insn >> 16) & 0xf; int rm = insn & 0xf; long rdv; - long rnv = regs->uregs[rn]; - long rmv = regs->uregs[rm]; /* rm/rmv may be invalid, don't care. */ + long rnv = (rn == 15) ? ppc : regs->uregs[rn]; + long rmv = (rm == 15) ? ppc : regs->uregs[rm]; long cpsr = regs->ARM_cpsr; fnr.dr = insnslot_llret_3arg_rflags(rnv, 0, rmv, cpsr, i_fn); diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 60c62c377fa9..2ba7deb3072e 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -22,6 +22,7 @@ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/module.h> +#include <linux/slab.h> #include <linux/stop_machine.h> #include <linux/stringify.h> #include <asm/traps.h> @@ -393,6 +394,14 @@ void __kprobes jprobe_return(void) /* * Setup an empty pt_regs. Fill SP and PC fields as * they're needed by longjmp_break_handler. + * + * We allocate some slack between the original SP and start of + * our fabricated regs. To be precise we want to have worst case + * covered which is STMFD with all 16 regs so we allocate 2 * + * sizeof(struct_pt_regs)). + * + * This is to prevent any simulated instruction from writing + * over the regs when they are accessing the stack. */ "sub sp, %0, %1 \n\t" "ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t" @@ -410,7 +419,7 @@ void __kprobes jprobe_return(void) "ldmia sp, {r0 - pc} \n\t" : : "r" (kcb->jprobe_saved_regs.ARM_sp), - "I" (sizeof(struct pt_regs)), + "I" (sizeof(struct pt_regs) * 2), "J" (offsetof(struct pt_regs, ARM_sp)), "J" (offsetof(struct pt_regs, ARM_pc)), "J" (offsetof(struct pt_regs, ARM_cpsr)) diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c new file mode 100644 index 000000000000..31a316c1777b --- /dev/null +++ b/arch/arm/kernel/leds.c @@ -0,0 +1,115 @@ +/* + * LED support code, ripped out of arch/arm/kernel/time.c + * + * Copyright (C) 1994-2001 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/module.h> +#include <linux/init.h> +#include <linux/sysdev.h> + +#include <asm/leds.h> + +static void dummy_leds_event(led_event_t evt) +{ +} + +void (*leds_event)(led_event_t) = dummy_leds_event; + +struct leds_evt_name { + const char name[8]; + int on; + int off; +}; + +static const struct leds_evt_name evt_names[] = { + { "amber", led_amber_on, led_amber_off }, + { "blue", led_blue_on, led_blue_off }, + { "green", led_green_on, led_green_off }, + { "red", led_red_on, led_red_off }, +}; + +static ssize_t leds_store(struct sys_device *dev, + struct sysdev_attribute *attr, + const char *buf, size_t size) +{ + int ret = -EINVAL, len = strcspn(buf, " "); + + if (len > 0 && buf[len] == '\0') + len--; + + if (strncmp(buf, "claim", len) == 0) { + leds_event(led_claim); + ret = size; + } else if (strncmp(buf, "release", len) == 0) { + leds_event(led_release); + ret = size; + } else { + int i; + + for (i = 0; i < ARRAY_SIZE(evt_names); i++) { + if (strlen(evt_names[i].name) != len || + strncmp(buf, evt_names[i].name, len) != 0) + continue; + if (strncmp(buf+len, " on", 3) == 0) { + leds_event(evt_names[i].on); + ret = size; + } else if (strncmp(buf+len, " off", 4) == 0) { + leds_event(evt_names[i].off); + ret = size; + } + break; + } + } + return ret; +} + +static SYSDEV_ATTR(event, 0200, NULL, leds_store); + +static int leds_suspend(struct sys_device *dev, pm_message_t state) +{ + leds_event(led_stop); + return 0; +} + +static int leds_resume(struct sys_device *dev) +{ + leds_event(led_start); + return 0; +} + +static int leds_shutdown(struct sys_device *dev) +{ + leds_event(led_halted); + return 0; +} + +static struct sysdev_class leds_sysclass = { + .name = "leds", + .shutdown = leds_shutdown, + .suspend = leds_suspend, + .resume = leds_resume, +}; + +static struct sys_device leds_device = { + .id = 0, + .cls = &leds_sysclass, +}; + +static int __init leds_init(void) +{ + int ret; + ret = sysdev_class_register(&leds_sysclass); + if (ret == 0) + ret = sysdev_register(&leds_device); + if (ret == 0) + ret = sysdev_create_file(&leds_device, &attr_event); + return ret; +} + +device_initcall(leds_init); + +EXPORT_SYMBOL(leds_event); diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index f28c5e9c51ea..c628bdf6c430 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -16,9 +16,9 @@ #include <linux/mm.h> #include <linux/elf.h> #include <linux/vmalloc.h> -#include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> +#include <linux/gfp.h> #include <asm/pgtable.h> #include <asm/sections.h> diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 000000000000..de12536d687f --- /dev/null +++ b/arch/arm/kernel/perf_event.c @@ -0,0 +1,3133 @@ +#undef DEBUG + +/* + * ARM performance counter support. + * + * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles + * + * ARMv7 support: Jean Pihet <jpihet@mvista.com> + * 2010 (c) MontaVista Software, LLC. + * + * This code is based on the sparc64 perf event code, which is in turn based + * on the x86 code. Callchain code is based on the ARM OProfile backtrace + * code. + */ +#define pr_fmt(fmt) "hw perfevents: " fmt + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> + +#include <asm/cputype.h> +#include <asm/irq.h> +#include <asm/irq_regs.h> +#include <asm/pmu.h> +#include <asm/stacktrace.h> + +static struct platform_device *pmu_device; + +/* + * Hardware lock to serialize accesses to PMU registers. Needed for the + * read/modify/write sequences. + */ +DEFINE_SPINLOCK(pmu_lock); + +/* + * ARMv6 supports a maximum of 3 events, starting from index 1. If we add + * another platform that supports more, we need to increase this to be the + * largest of all platforms. + * + * ARMv7 supports up to 32 events: + * cycle counter CCNT + 31 events counters CNT0..30. + * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. + */ +#define ARMPMU_MAX_HWEVENTS 33 + +/* The events for a given CPU. */ +struct cpu_hw_events { + /* + * The events that are active on the CPU for the given index. Index 0 + * is reserved. + */ + struct perf_event *events[ARMPMU_MAX_HWEVENTS]; + + /* + * A 1 bit for an index indicates that the counter is being used for + * an event. A 0 means that the counter can be used. + */ + unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; + + /* + * A 1 bit for an index indicates that the counter is actively being + * used. + */ + unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; +}; +DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); + +/* PMU names. */ +static const char *arm_pmu_names[] = { + [ARM_PERF_PMU_ID_XSCALE1] = "xscale1", + [ARM_PERF_PMU_ID_XSCALE2] = "xscale2", + [ARM_PERF_PMU_ID_V6] = "v6", + [ARM_PERF_PMU_ID_V6MP] = "v6mpcore", + [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8", + [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9", +}; + +struct arm_pmu { + enum arm_perf_pmu_ids id; + irqreturn_t (*handle_irq)(int irq_num, void *dev); + void (*enable)(struct hw_perf_event *evt, int idx); + void (*disable)(struct hw_perf_event *evt, int idx); + int (*event_map)(int evt); + u64 (*raw_event)(u64); + int (*get_event_idx)(struct cpu_hw_events *cpuc, + struct hw_perf_event *hwc); + u32 (*read_counter)(int idx); + void (*write_counter)(int idx, u32 val); + void (*start)(void); + void (*stop)(void); + int num_events; + u64 max_period; +}; + +/* Set at runtime when we know what CPU type we are. */ +static const struct arm_pmu *armpmu; + +enum arm_perf_pmu_ids +armpmu_get_pmu_id(void) +{ + int id = -ENODEV; + + if (armpmu != NULL) + id = armpmu->id; + + return id; +} +EXPORT_SYMBOL_GPL(armpmu_get_pmu_id); + +int +armpmu_get_max_events(void) +{ + int max_events = 0; + + if (armpmu != NULL) + max_events = armpmu->num_events; + + return max_events; +} +EXPORT_SYMBOL_GPL(armpmu_get_max_events); + +#define HW_OP_UNSUPPORTED 0xFFFF + +#define C(_x) \ + PERF_COUNT_HW_CACHE_##_x + +#define CACHE_OP_UNSUPPORTED 0xFFFF + +static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX]; + +static int +armpmu_map_cache_event(u64 config) +{ + unsigned int cache_type, cache_op, cache_result, ret; + + cache_type = (config >> 0) & 0xff; + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) + return -EINVAL; + + cache_op = (config >> 8) & 0xff; + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) + return -EINVAL; + + cache_result = (config >> 16) & 0xff; + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) + return -EINVAL; + + ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; + + if (ret == CACHE_OP_UNSUPPORTED) + return -ENOENT; + + return ret; +} + +static int +armpmu_event_set_period(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + s64 left = atomic64_read(&hwc->period_left); + s64 period = hwc->sample_period; + int ret = 0; + + if (unlikely(left <= -period)) { + left = period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (unlikely(left <= 0)) { + left += period; + atomic64_set(&hwc->period_left, left); + hwc->last_period = period; + ret = 1; + } + + if (left > (s64)armpmu->max_period) + left = armpmu->max_period; + + atomic64_set(&hwc->prev_count, (u64)-left); + + armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); + + perf_event_update_userpage(event); + + return ret; +} + +static u64 +armpmu_event_update(struct perf_event *event, + struct hw_perf_event *hwc, + int idx) +{ + int shift = 64 - 32; + s64 prev_raw_count, new_raw_count; + u64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = armpmu->read_counter(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + +static void +armpmu_disable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx = hwc->idx; + + WARN_ON(idx < 0); + + clear_bit(idx, cpuc->active_mask); + armpmu->disable(hwc, idx); + + barrier(); + + armpmu_event_update(event, hwc, idx); + cpuc->events[idx] = NULL; + clear_bit(idx, cpuc->used_mask); + + perf_event_update_userpage(event); +} + +static void +armpmu_read(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + + armpmu_event_update(event, hwc, hwc->idx); +} + +static void +armpmu_unthrottle(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + + /* + * Set the period again. Some counters can't be stopped, so when we + * were throttled we simply disabled the IRQ source and the counter + * may have been left counting. If we don't do this step then we may + * get an interrupt too soon or *way* too late if the overflow has + * happened since disabling. + */ + armpmu_event_set_period(event, hwc, hwc->idx); + armpmu->enable(hwc, hwc->idx); +} + +static int +armpmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct hw_perf_event *hwc = &event->hw; + int idx; + int err = 0; + + /* If we don't have a space for the counter then finish early. */ + idx = armpmu->get_event_idx(cpuc, hwc); + if (idx < 0) { + err = idx; + goto out; + } + + /* + * If there is an event in the counter we are going to use then make + * sure it is disabled. + */ + event->hw.idx = idx; + armpmu->disable(hwc, idx); + cpuc->events[idx] = event; + set_bit(idx, cpuc->active_mask); + + /* Set the period for the event. */ + armpmu_event_set_period(event, hwc, idx); + + /* Enable the event. */ + armpmu->enable(hwc, idx); + + /* Propagate our changes to the userspace mapping. */ + perf_event_update_userpage(event); + +out: + return err; +} + +static struct pmu pmu = { + .enable = armpmu_enable, + .disable = armpmu_disable, + .unthrottle = armpmu_unthrottle, + .read = armpmu_read, +}; + +static int +validate_event(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + struct hw_perf_event fake_event = event->hw; + + if (event->pmu && event->pmu != &pmu) + return 0; + + return armpmu->get_event_idx(cpuc, &fake_event) >= 0; +} + +static int +validate_group(struct perf_event *event) +{ + struct perf_event *sibling, *leader = event->group_leader; + struct cpu_hw_events fake_pmu; + + memset(&fake_pmu, 0, sizeof(fake_pmu)); + + if (!validate_event(&fake_pmu, leader)) + return -ENOSPC; + + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { + if (!validate_event(&fake_pmu, sibling)) + return -ENOSPC; + } + + if (!validate_event(&fake_pmu, event)) + return -ENOSPC; + + return 0; +} + +static int +armpmu_reserve_hardware(void) +{ + int i, err = -ENODEV, irq; + + pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); + if (IS_ERR(pmu_device)) { + pr_warning("unable to reserve pmu\n"); + return PTR_ERR(pmu_device); + } + + init_pmu(ARM_PMU_DEVICE_CPU); + + if (pmu_device->num_resources < 1) { + pr_err("no irqs for PMUs defined\n"); + return -ENODEV; + } + + for (i = 0; i < pmu_device->num_resources; ++i) { + irq = platform_get_irq(pmu_device, i); + if (irq < 0) + continue; + + err = request_irq(irq, armpmu->handle_irq, + IRQF_DISABLED | IRQF_NOBALANCING, + "armpmu", NULL); + if (err) { + pr_warning("unable to request IRQ%d for ARM perf " + "counters\n", irq); + break; + } + } + + if (err) { + for (i = i - 1; i >= 0; --i) { + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + release_pmu(pmu_device); + pmu_device = NULL; + } + + return err; +} + +static void +armpmu_release_hardware(void) +{ + int i, irq; + + for (i = pmu_device->num_resources - 1; i >= 0; --i) { + irq = platform_get_irq(pmu_device, i); + if (irq >= 0) + free_irq(irq, NULL); + } + armpmu->stop(); + + release_pmu(pmu_device); + pmu_device = NULL; +} + +static atomic_t active_events = ATOMIC_INIT(0); +static DEFINE_MUTEX(pmu_reserve_mutex); + +static void +hw_perf_event_destroy(struct perf_event *event) +{ + if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { + armpmu_release_hardware(); + mutex_unlock(&pmu_reserve_mutex); + } +} + +static int +__hw_perf_event_init(struct perf_event *event) +{ + struct hw_perf_event *hwc = &event->hw; + int mapping, err; + + /* Decode the generic type into an ARM event identifier. */ + if (PERF_TYPE_HARDWARE == event->attr.type) { + mapping = armpmu->event_map(event->attr.config); + } else if (PERF_TYPE_HW_CACHE == event->attr.type) { + mapping = armpmu_map_cache_event(event->attr.config); + } else if (PERF_TYPE_RAW == event->attr.type) { + mapping = armpmu->raw_event(event->attr.config); + } else { + pr_debug("event type %x not supported\n", event->attr.type); + return -EOPNOTSUPP; + } + + if (mapping < 0) { + pr_debug("event %x:%llx not supported\n", event->attr.type, + event->attr.config); + return mapping; + } + + /* + * Check whether we need to exclude the counter from certain modes. + * The ARM performance counters are on all of the time so if someone + * has asked us for some excludes then we have to fail. + */ + if (event->attr.exclude_kernel || event->attr.exclude_user || + event->attr.exclude_hv || event->attr.exclude_idle) { + pr_debug("ARM performance counters do not support " + "mode exclusion\n"); + return -EPERM; + } + + /* + * We don't assign an index until we actually place the event onto + * hardware. Use -1 to signify that we haven't decided where to put it + * yet. For SMP systems, each core has it's own PMU so we can't do any + * clever allocation or constraints checking at this point. + */ + hwc->idx = -1; + + /* + * Store the event encoding into the config_base field. config and + * event_base are unused as the only 2 things we need to know are + * the event mapping and the counter to use. The counter to use is + * also the indx and the config_base is the event type. + */ + hwc->config_base = (unsigned long)mapping; + hwc->config = 0; + hwc->event_base = 0; + + if (!hwc->sample_period) { + hwc->sample_period = armpmu->max_period; + hwc->last_period = hwc->sample_period; + atomic64_set(&hwc->period_left, hwc->sample_period); + } + + err = 0; + if (event->group_leader != event) { + err = validate_group(event); + if (err) + return -EINVAL; + } + + return err; +} + +const struct pmu * +hw_perf_event_init(struct perf_event *event) +{ + int err = 0; + + if (!armpmu) + return ERR_PTR(-ENODEV); + + event->destroy = hw_perf_event_destroy; + + if (!atomic_inc_not_zero(&active_events)) { + if (atomic_read(&active_events) > perf_max_events) { + atomic_dec(&active_events); + return ERR_PTR(-ENOSPC); + } + + mutex_lock(&pmu_reserve_mutex); + if (atomic_read(&active_events) == 0) { + err = armpmu_reserve_hardware(); + } + + if (!err) + atomic_inc(&active_events); + mutex_unlock(&pmu_reserve_mutex); + } + + if (err) + return ERR_PTR(err); + + err = __hw_perf_event_init(event); + if (err) + hw_perf_event_destroy(event); + + return err ? ERR_PTR(err) : &pmu; +} + +void +hw_perf_enable(void) +{ + /* Enable all of the perf events on hardware. */ + int idx; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + + if (!armpmu) + return; + + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + + if (!event) + continue; + + armpmu->enable(&event->hw, idx); + } + + armpmu->start(); +} + +void +hw_perf_disable(void) +{ + if (armpmu) + armpmu->stop(); +} + +/* + * ARMv6 Performance counter handling code. + * + * ARMv6 has 2 configurable performance counters and a single cycle counter. + * They all share a single reset bit but can be written to zero so we can use + * that for a reset. + * + * The counters can't be individually enabled or disabled so when we remove + * one event and replace it with another we could get spurious counts from the + * wrong event. However, we can take advantage of the fact that the + * performance counters can export events to the event bus, and the event bus + * itself can be monitored. This requires that we *don't* export the events to + * the event bus. The procedure for disabling a configurable counter is: + * - change the counter to count the ETMEXTOUT[0] signal (0x20). This + * effectively stops the counter from counting. + * - disable the counter's interrupt generation (each counter has it's + * own interrupt enable bit). + * Once stopped, the counter value can be written as 0 to reset. + * + * To enable a counter: + * - enable the counter's interrupt generation. + * - set the new event type. + * + * Note: the dedicated cycle counter only counts cycles and can't be + * enabled/disabled independently of the others. When we want to disable the + * cycle counter, we have to just disable the interrupt reporting and start + * ignoring that counter. When re-enabling, we have to reset the value and + * enable the interrupt. + */ + +enum armv6_perf_types { + ARMV6_PERFCTR_ICACHE_MISS = 0x0, + ARMV6_PERFCTR_IBUF_STALL = 0x1, + ARMV6_PERFCTR_DDEP_STALL = 0x2, + ARMV6_PERFCTR_ITLB_MISS = 0x3, + ARMV6_PERFCTR_DTLB_MISS = 0x4, + ARMV6_PERFCTR_BR_EXEC = 0x5, + ARMV6_PERFCTR_BR_MISPREDICT = 0x6, + ARMV6_PERFCTR_INSTR_EXEC = 0x7, + ARMV6_PERFCTR_DCACHE_HIT = 0x9, + ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, + ARMV6_PERFCTR_DCACHE_MISS = 0xB, + ARMV6_PERFCTR_DCACHE_WBACK = 0xC, + ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, + ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, + ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, + ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, + ARMV6_PERFCTR_WBUF_DRAINED = 0x12, + ARMV6_PERFCTR_CPU_CYCLES = 0xFF, + ARMV6_PERFCTR_NOP = 0x20, +}; + +enum armv6_counters { + ARMV6_CYCLE_COUNTER = 1, + ARMV6_COUNTER0, + ARMV6_COUNTER1, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +enum armv6mpcore_perf_types { + ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, + ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, + ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, + ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, + ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, + ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, + ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, + ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, + ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, + ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, + ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, + ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, + ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, + ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, + ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, + ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, +}; + +/* + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, + [C(RESULT_MISS)] = + ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * The ARM performance counters can count micro DTLB misses, + * micro ITLB misses and main TLB misses. There isn't an event + * for TLB misses, so use the micro misses here and if users + * want the main TLB misses they can use a raw counter. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +static inline unsigned long +armv6_pmcr_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); + return val; +} + +static inline void +armv6_pmcr_write(unsigned long val) +{ + asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); +} + +#define ARMV6_PMCR_ENABLE (1 << 0) +#define ARMV6_PMCR_CTR01_RESET (1 << 1) +#define ARMV6_PMCR_CCOUNT_RESET (1 << 2) +#define ARMV6_PMCR_CCOUNT_DIV (1 << 3) +#define ARMV6_PMCR_COUNT0_IEN (1 << 4) +#define ARMV6_PMCR_COUNT1_IEN (1 << 5) +#define ARMV6_PMCR_CCOUNT_IEN (1 << 6) +#define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) +#define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) +#define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) +#define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 +#define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) +#define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 +#define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) + +#define ARMV6_PMCR_OVERFLOWED_MASK \ + (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ + ARMV6_PMCR_CCOUNT_OVERFLOW) + +static inline int +armv6_pmcr_has_overflowed(unsigned long pmcr) +{ + return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK); +} + +static inline int +armv6_pmcr_counter_has_overflowed(unsigned long pmcr, + enum armv6_counters counter) +{ + int ret = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; + else if (ARMV6_COUNTER0 == counter) + ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; + else if (ARMV6_COUNTER1 == counter) + ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return ret; +} + +static inline u32 +armv6pmu_read_counter(int counter) +{ + unsigned long value = 0; + + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + + return value; +} + +static inline void +armv6pmu_write_counter(int counter, + u32 value) +{ + if (ARMV6_CYCLE_COUNTER == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); + else if (ARMV6_COUNTER0 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); + else if (ARMV6_COUNTER1 == counter) + asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); + else + WARN_ONCE(1, "invalid counter number (%d)\n", counter); +} + +void +armv6pmu_enable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, evt, flags; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = 0; + evt = ARMV6_PMCR_CCOUNT_IEN; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_EVT_COUNT0_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | + ARMV6_PMCR_COUNT0_IEN; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_EVT_COUNT1_MASK; + evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | + ARMV6_PMCR_COUNT1_IEN; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the event + * that we're interested in. + */ + spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static irqreturn_t +armv6pmu_handle_irq(int irq_num, + void *dev) +{ + unsigned long pmcr = armv6_pmcr_read(); + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + if (!armv6_pmcr_has_overflowed(pmcr)) + return IRQ_NONE; + + regs = get_irq_regs(); + + /* + * The interrupts are cleared by writing the overflow flags back to + * the control register. All of the other bits don't have any effect + * if they are rewritten, so write the whole value back. + */ + armv6_pmcr_write(pmcr); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts enabled. For + * platforms that can have the PMU interrupts raised as a PMI, this + * will not work. + */ + perf_event_do_pending(); + + return IRQ_HANDLED; +} + +static void +armv6pmu_start(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val |= ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +void +armv6pmu_stop(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~ARMV6_PMCR_ENABLE; + armv6_pmcr_write(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline int +armv6pmu_event_map(int config) +{ + int mapping = armv6_perf_map[config]; + if (HW_OP_UNSUPPORTED == mapping) + mapping = -EOPNOTSUPP; + return mapping; +} + +static inline int +armv6mpcore_pmu_event_map(int config) +{ + int mapping = armv6mpcore_perf_map[config]; + if (HW_OP_UNSUPPORTED == mapping) + mapping = -EOPNOTSUPP; + return mapping; +} + +static u64 +armv6pmu_raw_event(u64 config) +{ + return config & 0xff; +} + +static int +armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + /* Always place a cycle counter into the cycle counter. */ + if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { + if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV6_CYCLE_COUNTER; + } else { + /* + * For anything other than a cycle counter, try and use + * counter0 and counter1. + */ + if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) { + return ARMV6_COUNTER1; + } + + if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) { + return ARMV6_COUNTER0; + } + + /* The counters are all in use. */ + return -EAGAIN; + } +} + +static void +armv6pmu_disable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, evt, flags; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = ARMV6_PMCR_CCOUNT_IEN; + evt = 0; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; + evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Mask out the current event and set the counter to count the number + * of ETM bus signal assertion cycles. The external reporting should + * be disabled and so this should never increment. + */ + spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, + int idx) +{ + unsigned long val, mask, flags, evt = 0; + + if (ARMV6_CYCLE_COUNTER == idx) { + mask = ARMV6_PMCR_CCOUNT_IEN; + } else if (ARMV6_COUNTER0 == idx) { + mask = ARMV6_PMCR_COUNT0_IEN; + } else if (ARMV6_COUNTER1 == idx) { + mask = ARMV6_PMCR_COUNT1_IEN; + } else { + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + /* + * Unlike UP ARMv6, we don't have a way of stopping the counters. We + * simply disable the interrupt reporting. + */ + spin_lock_irqsave(&pmu_lock, flags); + val = armv6_pmcr_read(); + val &= ~mask; + val |= evt; + armv6_pmcr_write(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static const struct arm_pmu armv6pmu = { + .id = ARM_PERF_PMU_ID_V6, + .handle_irq = armv6pmu_handle_irq, + .enable = armv6pmu_enable_event, + .disable = armv6pmu_disable_event, + .event_map = armv6pmu_event_map, + .raw_event = armv6pmu_raw_event, + .read_counter = armv6pmu_read_counter, + .write_counter = armv6pmu_write_counter, + .get_event_idx = armv6pmu_get_event_idx, + .start = armv6pmu_start, + .stop = armv6pmu_stop, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +/* + * ARMv6mpcore is almost identical to single core ARMv6 with the exception + * that some of the events have different enumerations and that there is no + * *hack* to stop the programmable counters. To stop the counters we simply + * disable the interrupt reporting and update the event. When unthrottling we + * reset the period and enable the interrupt reporting. + */ +static const struct arm_pmu armv6mpcore_pmu = { + .id = ARM_PERF_PMU_ID_V6MP, + .handle_irq = armv6pmu_handle_irq, + .enable = armv6pmu_enable_event, + .disable = armv6mpcore_pmu_disable_event, + .event_map = armv6mpcore_pmu_event_map, + .raw_event = armv6pmu_raw_event, + .read_counter = armv6pmu_read_counter, + .write_counter = armv6pmu_write_counter, + .get_event_idx = armv6pmu_get_event_idx, + .start = armv6pmu_start, + .stop = armv6pmu_stop, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +/* + * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. + * + * Copied from ARMv6 code, with the low level code inspired + * by the ARMv7 Oprofile code. + * + * Cortex-A8 has up to 4 configurable performance counters and + * a single cycle counter. + * Cortex-A9 has up to 31 configurable performance counters and + * a single cycle counter. + * + * All counters can be enabled/disabled and IRQ masked separately. The cycle + * counter and all 4 performance counters together can be reset separately. + */ + +/* Common ARMv7 event types */ +enum armv7_perf_types { + ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, + ARMV7_PERFCTR_IFETCH_MISS = 0x01, + ARMV7_PERFCTR_ITLB_MISS = 0x02, + ARMV7_PERFCTR_DCACHE_REFILL = 0x03, + ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, + ARMV7_PERFCTR_DTLB_REFILL = 0x05, + ARMV7_PERFCTR_DREAD = 0x06, + ARMV7_PERFCTR_DWRITE = 0x07, + + ARMV7_PERFCTR_EXC_TAKEN = 0x09, + ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, + ARMV7_PERFCTR_CID_WRITE = 0x0B, + /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. + * It counts: + * - all branch instructions, + * - instructions that explicitly write the PC, + * - exception generating instructions. + */ + ARMV7_PERFCTR_PC_WRITE = 0x0C, + ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, + ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, + ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, + ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, + + ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, + + ARMV7_PERFCTR_CPU_CYCLES = 0xFF +}; + +/* ARMv7 Cortex-A8 specific event types */ +enum armv7_a8_perf_types { + ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, + + ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, + + ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, + ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, + ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, + ARMV7_PERFCTR_L2_ACCESS = 0x43, + ARMV7_PERFCTR_L2_CACH_MISS = 0x44, + ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, + ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, + ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, + ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, + ARMV7_PERFCTR_L1_DATA_MISS = 0x49, + ARMV7_PERFCTR_L1_INST_MISS = 0x4A, + ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, + ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, + ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, + ARMV7_PERFCTR_L2_NEON = 0x4E, + ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, + ARMV7_PERFCTR_L1_INST = 0x50, + ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, + ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, + ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, + ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, + ARMV7_PERFCTR_OP_EXECUTED = 0x55, + ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, + ARMV7_PERFCTR_CYCLES_INST = 0x57, + ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, + ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, + ARMV7_PERFCTR_NEON_CYCLES = 0x5A, + + ARMV7_PERFCTR_PMU0_EVENTS = 0x70, + ARMV7_PERFCTR_PMU1_EVENTS = 0x71, + ARMV7_PERFCTR_PMU_EVENTS = 0x72, +}; + +/* ARMv7 Cortex-A9 specific event types */ +enum armv7_a9_perf_types { + ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, + ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, + ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, + + ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, + ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, + + ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, + ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, + ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, + ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, + ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, + ARMV7_PERFCTR_DATA_EVICTION = 0x65, + ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, + ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, + ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, + + ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, + + ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, + ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, + ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, + ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, + ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, + + ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, + ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, + ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, + ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, + ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, + ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, + ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, + + ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, + ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, + + ARMV7_PERFCTR_ISB_INST = 0x90, + ARMV7_PERFCTR_DSB_INST = 0x91, + ARMV7_PERFCTR_DMB_INST = 0x92, + ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, + + ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, + ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, + ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, + ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, + ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, + ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 +}; + +/* + * Cortex-A8 HW events mapping + * + * The hardware events that we support. We do support cache operations but + * we have harvard caches and no way to combine instruction and data + * accesses/misses in hardware. + */ +static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * Only ITLB misses and DTLB refills are supported. + * If users want the DTLB refills misses a raw counter + * must be used. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Cortex-A9 HW events mapping + */ +static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = + ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, + [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, + [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, +}; + +static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + /* + * The performance counters don't differentiate between read + * and write accesses/misses so this isn't strictly correct, + * but it's the best we can do. Writes and reads get + * combined. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + /* + * Only ITLB misses and DTLB refills are supported. + * If users want the DTLB refills misses a raw counter + * must be used. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +/* + * Perf Events counters + */ +enum armv7_counters { + ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ + ARMV7_COUNTER0 = 2, /* First event counter */ +}; + +/* + * The cycle counter is ARMV7_CYCLE_COUNTER. + * The first event counter is ARMV7_COUNTER0. + * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). + */ +#define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) + +/* + * ARMv7 low level PMNC access + */ + +/* + * Per-CPU PMNC: config reg + */ +#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ +#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ +#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ +#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ +#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ +#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ +#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ +#define ARMV7_PMNC_N_MASK 0x1f +#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ + +/* + * Available counters + */ +#define ARMV7_CNT0 0 /* First event counter */ +#define ARMV7_CCNT 31 /* Cycle counter */ + +/* Perf Event to low level counters mapping */ +#define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) + +/* + * CNTENS: counters enable reg + */ +#define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_CNTENS_C (1 << ARMV7_CCNT) + +/* + * CNTENC: counters disable reg + */ +#define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_CNTENC_C (1 << ARMV7_CCNT) + +/* + * INTENS: counters overflow interrupt enable reg + */ +#define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_INTENS_C (1 << ARMV7_CCNT) + +/* + * INTENC: counters overflow interrupt disable reg + */ +#define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_INTENC_C (1 << ARMV7_CCNT) + +/* + * EVTSEL: Event selection reg + */ +#define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ + +/* + * SELECT: Counter selection reg + */ +#define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ + +/* + * FLAG: counters overflow flag status reg + */ +#define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) +#define ARMV7_FLAG_C (1 << ARMV7_CCNT) +#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ +#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK + +static inline unsigned long armv7_pmnc_read(void) +{ + u32 val; + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); + return val; +} + +static inline void armv7_pmnc_write(unsigned long val) +{ + val &= ARMV7_PMNC_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); +} + +static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) +{ + return pmnc & ARMV7_OVERFLOWED_MASK; +} + +static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, + enum armv7_counters counter) +{ + int ret; + + if (counter == ARMV7_CYCLE_COUNTER) + ret = pmnc & ARMV7_FLAG_C; + else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) + ret = pmnc & ARMV7_FLAG_P(counter); + else + pr_err("CPU%u checking wrong counter %d overflow status\n", + smp_processor_id(), counter); + + return ret; +} + +static inline int armv7_pmnc_select_counter(unsigned int idx) +{ + u32 val; + + if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { + pr_err("CPU%u selecting wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); + + return idx; +} + +static inline u32 armv7pmu_read_counter(int idx) +{ + unsigned long value = 0; + + if (idx == ARMV7_CYCLE_COUNTER) + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); + else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { + if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mrc p15, 0, %0, c9, c13, 2" + : "=r" (value)); + } else + pr_err("CPU%u reading wrong counter %d\n", + smp_processor_id(), idx); + + return value; +} + +static inline void armv7pmu_write_counter(int idx, u32 value) +{ + if (idx == ARMV7_CYCLE_COUNTER) + asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); + else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { + if (armv7_pmnc_select_counter(idx) == idx) + asm volatile("mcr p15, 0, %0, c9, c13, 2" + : : "r" (value)); + } else + pr_err("CPU%u writing wrong counter %d\n", + smp_processor_id(), idx); +} + +static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) +{ + if (armv7_pmnc_select_counter(idx) == idx) { + val &= ARMV7_EVTSEL_MASK; + asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); + } +} + +static inline u32 armv7_pmnc_enable_counter(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u enabling wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_CNTENS_C; + else + val = ARMV7_CNTENS_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_disable_counter(unsigned int idx) +{ + u32 val; + + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u disabling wrong PMNC counter" + " %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_CNTENC_C; + else + val = ARMV7_CNTENC_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_enable_intens(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u enabling wrong PMNC counter" + " interrupt enable %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_INTENS_C; + else + val = ARMV7_INTENS_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_disable_intens(unsigned int idx) +{ + u32 val; + + if ((idx != ARMV7_CYCLE_COUNTER) && + ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { + pr_err("CPU%u disabling wrong PMNC counter" + " interrupt enable %d\n", smp_processor_id(), idx); + return -1; + } + + if (idx == ARMV7_CYCLE_COUNTER) + val = ARMV7_INTENC_C; + else + val = ARMV7_INTENC_P(idx); + + asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); + + return idx; +} + +static inline u32 armv7_pmnc_getreset_flags(void) +{ + u32 val; + + /* Read */ + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + + /* Write to clear flags */ + val &= ARMV7_FLAG_MASK; + asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); + + return val; +} + +#ifdef DEBUG +static void armv7_pmnc_dump_regs(void) +{ + u32 val; + unsigned int cnt; + + printk(KERN_INFO "PMNC registers dump:\n"); + + asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); + printk(KERN_INFO "PMNC =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); + printk(KERN_INFO "CNTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); + printk(KERN_INFO "INTENS=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); + printk(KERN_INFO "FLAGS =0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); + printk(KERN_INFO "SELECT=0x%08x\n", val); + + asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); + printk(KERN_INFO "CCNT =0x%08x\n", val); + + for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { + armv7_pmnc_select_counter(cnt); + asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); + printk(KERN_INFO "CNT[%d] count =0x%08x\n", + cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); + printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", + cnt-ARMV7_EVENT_CNT_TO_CNTx, val); + } +} +#endif + +void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + + /* + * Enable counter and interrupt, and set the counter to count + * the event that we're interested in. + */ + spin_lock_irqsave(&pmu_lock, flags); + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Set event (if destined for PMNx counters) + * We don't need to set the event if it's a cycle count + */ + if (idx != ARMV7_CYCLE_COUNTER) + armv7_pmnc_write_evtsel(idx, hwc->config_base); + + /* + * Enable interrupt for this counter + */ + armv7_pmnc_enable_intens(idx); + + /* + * Enable counter + */ + armv7_pmnc_enable_counter(idx); + + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags; + + /* + * Disable counter and interrupt + */ + spin_lock_irqsave(&pmu_lock, flags); + + /* + * Disable counter + */ + armv7_pmnc_disable_counter(idx); + + /* + * Disable interrupt for this counter + */ + armv7_pmnc_disable_intens(idx); + + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* + * Get and reset the IRQ flags + */ + pmnc = armv7_pmnc_getreset_flags(); + + /* + * Did an overflow occur? + */ + if (!armv7_pmnc_has_overflowed(pmnc)) + return IRQ_NONE; + + /* + * Handle the counter(s) overflow(s) + */ + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + /* + * We have a single interrupt for all counters. Check that + * each counter has overflowed before we process it. + */ + if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + /* + * Handle the pending perf events. + * + * Note: this call *must* be run with interrupts enabled. For + * platforms that can have the PMU interrupts raised as a PMI, this + * will not work. + */ + perf_event_do_pending(); + + return IRQ_HANDLED; +} + +static void armv7pmu_start(void) +{ + unsigned long flags; + + spin_lock_irqsave(&pmu_lock, flags); + /* Enable all counters */ + armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void armv7pmu_stop(void) +{ + unsigned long flags; + + spin_lock_irqsave(&pmu_lock, flags); + /* Disable all counters */ + armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline int armv7_a8_pmu_event_map(int config) +{ + int mapping = armv7_a8_perf_map[config]; + if (HW_OP_UNSUPPORTED == mapping) + mapping = -EOPNOTSUPP; + return mapping; +} + +static inline int armv7_a9_pmu_event_map(int config) +{ + int mapping = armv7_a9_perf_map[config]; + if (HW_OP_UNSUPPORTED == mapping) + mapping = -EOPNOTSUPP; + return mapping; +} + +static u64 armv7pmu_raw_event(u64 config) +{ + return config & 0xff; +} + +static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + int idx; + + /* Always place a cycle counter into the cycle counter. */ + if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { + if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return ARMV7_CYCLE_COUNTER; + } else { + /* + * For anything other than a cycle counter, try and use + * the events counters + */ + for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { + if (!test_and_set_bit(idx, cpuc->used_mask)) + return idx; + } + + /* The counters are all in use. */ + return -EAGAIN; + } +} + +static struct arm_pmu armv7pmu = { + .handle_irq = armv7pmu_handle_irq, + .enable = armv7pmu_enable_event, + .disable = armv7pmu_disable_event, + .raw_event = armv7pmu_raw_event, + .read_counter = armv7pmu_read_counter, + .write_counter = armv7pmu_write_counter, + .get_event_idx = armv7pmu_get_event_idx, + .start = armv7pmu_start, + .stop = armv7pmu_stop, + .max_period = (1LLU << 32) - 1, +}; + +static u32 __init armv7_reset_read_pmnc(void) +{ + u32 nb_cnt; + + /* Initialize & Reset PMNC: C and P bits */ + armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); + + /* Read the nb of CNTx counters supported from PMNC */ + nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; + + /* Add the CPU cycles counter and return */ + return nb_cnt + 1; +} + +/* + * ARMv5 [xscale] Performance counter handling code. + * + * Based on xscale OProfile code. + * + * There are two variants of the xscale PMU that we support: + * - xscale1pmu: 2 event counters and a cycle counter + * - xscale2pmu: 4 event counters and a cycle counter + * The two variants share event definitions, but have different + * PMU structures. + */ + +enum xscale_perf_types { + XSCALE_PERFCTR_ICACHE_MISS = 0x00, + XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01, + XSCALE_PERFCTR_DATA_STALL = 0x02, + XSCALE_PERFCTR_ITLB_MISS = 0x03, + XSCALE_PERFCTR_DTLB_MISS = 0x04, + XSCALE_PERFCTR_BRANCH = 0x05, + XSCALE_PERFCTR_BRANCH_MISS = 0x06, + XSCALE_PERFCTR_INSTRUCTION = 0x07, + XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08, + XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09, + XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A, + XSCALE_PERFCTR_DCACHE_MISS = 0x0B, + XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C, + XSCALE_PERFCTR_PC_CHANGED = 0x0D, + XSCALE_PERFCTR_BCU_REQUEST = 0x10, + XSCALE_PERFCTR_BCU_FULL = 0x11, + XSCALE_PERFCTR_BCU_DRAIN = 0x12, + XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14, + XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15, + XSCALE_PERFCTR_RMW = 0x16, + /* XSCALE_PERFCTR_CCNT is not hardware defined */ + XSCALE_PERFCTR_CCNT = 0xFE, + XSCALE_PERFCTR_UNUSED = 0xFF, +}; + +enum xscale_counters { + XSCALE_CYCLE_COUNTER = 1, + XSCALE_COUNTER0, + XSCALE_COUNTER1, + XSCALE_COUNTER2, + XSCALE_COUNTER3, +}; + +static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT, + [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH, + [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + +#define XSCALE_PMU_ENABLE 0x001 +#define XSCALE_PMN_RESET 0x002 +#define XSCALE_CCNT_RESET 0x004 +#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) +#define XSCALE_PMU_CNT64 0x008 + +static inline int +xscalepmu_event_map(int config) +{ + int mapping = xscale_perf_map[config]; + if (HW_OP_UNSUPPORTED == mapping) + mapping = -EOPNOTSUPP; + return mapping; +} + +static u64 +xscalepmu_raw_event(u64 config) +{ + return config & 0xff; +} + +#define XSCALE1_OVERFLOWED_MASK 0x700 +#define XSCALE1_CCOUNT_OVERFLOW 0x400 +#define XSCALE1_COUNT0_OVERFLOW 0x100 +#define XSCALE1_COUNT1_OVERFLOW 0x200 +#define XSCALE1_CCOUNT_INT_EN 0x040 +#define XSCALE1_COUNT0_INT_EN 0x010 +#define XSCALE1_COUNT1_INT_EN 0x020 +#define XSCALE1_COUNT0_EVT_SHFT 12 +#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT) +#define XSCALE1_COUNT1_EVT_SHFT 20 +#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT) + +static inline u32 +xscale1pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val)); + return val; +} + +static inline void +xscale1pmu_write_pmnc(u32 val) +{ + /* upper 4bits and 7, 11 are write-as-0 */ + val &= 0xffff77f; + asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val)); +} + +static inline int +xscale1_pmnc_counter_has_overflowed(unsigned long pmnc, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = pmnc & XSCALE1_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = pmnc & XSCALE1_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = pmnc & XSCALE1_COUNT1_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale1pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* + * NOTE: there's an A stepping erratum that states if an overflow + * bit already exists and another occurs, the previous + * Overflow bit gets cleared. There's no workaround. + * Fixed in B stepping or later. + */ + pmnc = xscale1pmu_read_pmnc(); + + /* + * Write the value back to clear the overflow flags. Overflow + * flags remain in pmnc for use below. We also disable the PMU + * while we process the interrupt. + */ + xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + if (!(pmnc & XSCALE1_OVERFLOWED_MASK)) + return IRQ_NONE; + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + perf_event_do_pending(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void +xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long val, mask, evt, flags; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = 0; + evt = XSCALE1_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) | + XSCALE1_COUNT0_INT_EN; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_EVT_MASK; + evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) | + XSCALE1_COUNT1_INT_EN; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long val, mask, evt, flags; + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + mask = XSCALE1_CCOUNT_INT_EN; + evt = 0; + break; + case XSCALE_COUNTER0: + mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK; + evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~mask; + val |= evt; + xscale1pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int +xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + if (XSCALE_PERFCTR_CCNT == event->config_base) { + if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask)) + return -EAGAIN; + + return XSCALE_CYCLE_COUNTER; + } else { + if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) { + return XSCALE_COUNTER1; + } + + if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) { + return XSCALE_COUNTER0; + } + + return -EAGAIN; + } +} + +static void +xscale1pmu_start(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val |= XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale1pmu_stop(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale1pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale1pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline u32 +xscale1pmu_read_counter(int counter) +{ + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void +xscale1pmu_write_counter(int counter, u32 val) +{ + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val)); + break; + } +} + +static const struct arm_pmu xscale1pmu = { + .id = ARM_PERF_PMU_ID_XSCALE1, + .handle_irq = xscale1pmu_handle_irq, + .enable = xscale1pmu_enable_event, + .disable = xscale1pmu_disable_event, + .event_map = xscalepmu_event_map, + .raw_event = xscalepmu_raw_event, + .read_counter = xscale1pmu_read_counter, + .write_counter = xscale1pmu_write_counter, + .get_event_idx = xscale1pmu_get_event_idx, + .start = xscale1pmu_start, + .stop = xscale1pmu_stop, + .num_events = 3, + .max_period = (1LLU << 32) - 1, +}; + +#define XSCALE2_OVERFLOWED_MASK 0x01f +#define XSCALE2_CCOUNT_OVERFLOW 0x001 +#define XSCALE2_COUNT0_OVERFLOW 0x002 +#define XSCALE2_COUNT1_OVERFLOW 0x004 +#define XSCALE2_COUNT2_OVERFLOW 0x008 +#define XSCALE2_COUNT3_OVERFLOW 0x010 +#define XSCALE2_CCOUNT_INT_EN 0x001 +#define XSCALE2_COUNT0_INT_EN 0x002 +#define XSCALE2_COUNT1_INT_EN 0x004 +#define XSCALE2_COUNT2_INT_EN 0x008 +#define XSCALE2_COUNT3_INT_EN 0x010 +#define XSCALE2_COUNT0_EVT_SHFT 0 +#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT) +#define XSCALE2_COUNT1_EVT_SHFT 8 +#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT) +#define XSCALE2_COUNT2_EVT_SHFT 16 +#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT) +#define XSCALE2_COUNT3_EVT_SHFT 24 +#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT) + +static inline u32 +xscale2pmu_read_pmnc(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val)); + /* bits 1-2 and 4-23 are read-unpredictable */ + return val & 0xff000009; +} + +static inline void +xscale2pmu_write_pmnc(u32 val) +{ + /* bits 4-23 are write-as-0, 24-31 are write ignored */ + val &= 0xf; + asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_overflow_flags(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_overflow_flags(u32 val) +{ + asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val)); +} + +static inline u32 +xscale2pmu_read_event_select(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val)); + return val; +} + +static inline void +xscale2pmu_write_event_select(u32 val) +{ + asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val)); +} + +static inline u32 +xscale2pmu_read_int_enable(void) +{ + u32 val; + asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val)); + return val; +} + +static void +xscale2pmu_write_int_enable(u32 val) +{ + asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val)); +} + +static inline int +xscale2_pmnc_counter_has_overflowed(unsigned long of_flags, + enum xscale_counters counter) +{ + int ret = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + ret = of_flags & XSCALE2_CCOUNT_OVERFLOW; + break; + case XSCALE_COUNTER0: + ret = of_flags & XSCALE2_COUNT0_OVERFLOW; + break; + case XSCALE_COUNTER1: + ret = of_flags & XSCALE2_COUNT1_OVERFLOW; + break; + case XSCALE_COUNTER2: + ret = of_flags & XSCALE2_COUNT2_OVERFLOW; + break; + case XSCALE_COUNTER3: + ret = of_flags & XSCALE2_COUNT3_OVERFLOW; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", counter); + } + + return ret; +} + +static irqreturn_t +xscale2pmu_handle_irq(int irq_num, void *dev) +{ + unsigned long pmnc, of_flags; + struct perf_sample_data data; + struct cpu_hw_events *cpuc; + struct pt_regs *regs; + int idx; + + /* Disable the PMU. */ + pmnc = xscale2pmu_read_pmnc(); + xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE); + + /* Check the overflow flag register. */ + of_flags = xscale2pmu_read_overflow_flags(); + if (!(of_flags & XSCALE2_OVERFLOWED_MASK)) + return IRQ_NONE; + + /* Clear the overflow bits. */ + xscale2pmu_write_overflow_flags(of_flags); + + regs = get_irq_regs(); + + perf_sample_data_init(&data, 0); + + cpuc = &__get_cpu_var(cpu_hw_events); + for (idx = 0; idx <= armpmu->num_events; ++idx) { + struct perf_event *event = cpuc->events[idx]; + struct hw_perf_event *hwc; + + if (!test_bit(idx, cpuc->active_mask)) + continue; + + if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) + continue; + + hwc = &event->hw; + armpmu_event_update(event, hwc, idx); + data.period = event->hw.last_period; + if (!armpmu_event_set_period(event, hwc, idx)) + continue; + + if (perf_event_overflow(event, 0, &data, regs)) + armpmu->disable(hwc, idx); + } + + perf_event_do_pending(); + + /* + * Re-enable the PMU. + */ + pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(pmnc); + + return IRQ_HANDLED; +} + +static void +xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags, ien, evtsel; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien |= XSCALE2_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + ien |= XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + ien |= XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT; + break; + case XSCALE_COUNTER2: + ien |= XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT; + break; + case XSCALE_COUNTER3: + ien |= XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + spin_lock_irqsave(&pmu_lock, flags); + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) +{ + unsigned long flags, ien, evtsel; + + ien = xscale2pmu_read_int_enable(); + evtsel = xscale2pmu_read_event_select(); + + switch (idx) { + case XSCALE_CYCLE_COUNTER: + ien &= ~XSCALE2_CCOUNT_INT_EN; + break; + case XSCALE_COUNTER0: + ien &= ~XSCALE2_COUNT0_INT_EN; + evtsel &= ~XSCALE2_COUNT0_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; + break; + case XSCALE_COUNTER1: + ien &= ~XSCALE2_COUNT1_INT_EN; + evtsel &= ~XSCALE2_COUNT1_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; + break; + case XSCALE_COUNTER2: + ien &= ~XSCALE2_COUNT2_INT_EN; + evtsel &= ~XSCALE2_COUNT2_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; + break; + case XSCALE_COUNTER3: + ien &= ~XSCALE2_COUNT3_INT_EN; + evtsel &= ~XSCALE2_COUNT3_EVT_MASK; + evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; + break; + default: + WARN_ONCE(1, "invalid counter number (%d)\n", idx); + return; + } + + spin_lock_irqsave(&pmu_lock, flags); + xscale2pmu_write_event_select(evtsel); + xscale2pmu_write_int_enable(ien); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static int +xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, + struct hw_perf_event *event) +{ + int idx = xscale1pmu_get_event_idx(cpuc, event); + if (idx >= 0) + goto out; + + if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask)) + idx = XSCALE_COUNTER3; + else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask)) + idx = XSCALE_COUNTER2; +out: + return idx; +} + +static void +xscale2pmu_start(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; + val |= XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static void +xscale2pmu_stop(void) +{ + unsigned long flags, val; + + spin_lock_irqsave(&pmu_lock, flags); + val = xscale2pmu_read_pmnc(); + val &= ~XSCALE_PMU_ENABLE; + xscale2pmu_write_pmnc(val); + spin_unlock_irqrestore(&pmu_lock, flags); +} + +static inline u32 +xscale2pmu_read_counter(int counter) +{ + u32 val = 0; + + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val)); + break; + } + + return val; +} + +static inline void +xscale2pmu_write_counter(int counter, u32 val) +{ + switch (counter) { + case XSCALE_CYCLE_COUNTER: + asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val)); + break; + case XSCALE_COUNTER0: + asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER1: + asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER2: + asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val)); + break; + case XSCALE_COUNTER3: + asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val)); + break; + } +} + +static const struct arm_pmu xscale2pmu = { + .id = ARM_PERF_PMU_ID_XSCALE2, + .handle_irq = xscale2pmu_handle_irq, + .enable = xscale2pmu_enable_event, + .disable = xscale2pmu_disable_event, + .event_map = xscalepmu_event_map, + .raw_event = xscalepmu_raw_event, + .read_counter = xscale2pmu_read_counter, + .write_counter = xscale2pmu_write_counter, + .get_event_idx = xscale2pmu_get_event_idx, + .start = xscale2pmu_start, + .stop = xscale2pmu_stop, + .num_events = 5, + .max_period = (1LLU << 32) - 1, +}; + +static int __init +init_hw_perf_events(void) +{ + unsigned long cpuid = read_cpuid_id(); + unsigned long implementor = (cpuid & 0xFF000000) >> 24; + unsigned long part_number = (cpuid & 0xFFF0); + + /* ARM Ltd CPUs. */ + if (0x41 == implementor) { + switch (part_number) { + case 0xB360: /* ARM1136 */ + case 0xB560: /* ARM1156 */ + case 0xB760: /* ARM1176 */ + armpmu = &armv6pmu; + memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, + sizeof(armv6_perf_cache_map)); + perf_max_events = armv6pmu.num_events; + break; + case 0xB020: /* ARM11mpcore */ + armpmu = &armv6mpcore_pmu; + memcpy(armpmu_perf_cache_map, + armv6mpcore_perf_cache_map, + sizeof(armv6mpcore_perf_cache_map)); + perf_max_events = armv6mpcore_pmu.num_events; + break; + case 0xC080: /* Cortex-A8 */ + armv7pmu.id = ARM_PERF_PMU_ID_CA8; + memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, + sizeof(armv7_a8_perf_cache_map)); + armv7pmu.event_map = armv7_a8_pmu_event_map; + armpmu = &armv7pmu; + + /* Reset PMNC and read the nb of CNTx counters + supported */ + armv7pmu.num_events = armv7_reset_read_pmnc(); + perf_max_events = armv7pmu.num_events; + break; + case 0xC090: /* Cortex-A9 */ + armv7pmu.id = ARM_PERF_PMU_ID_CA9; + memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, + sizeof(armv7_a9_perf_cache_map)); + armv7pmu.event_map = armv7_a9_pmu_event_map; + armpmu = &armv7pmu; + + /* Reset PMNC and read the nb of CNTx counters + supported */ + armv7pmu.num_events = armv7_reset_read_pmnc(); + perf_max_events = armv7pmu.num_events; + break; + } + /* Intel CPUs [xscale]. */ + } else if (0x69 == implementor) { + part_number = (cpuid >> 13) & 0x7; + switch (part_number) { + case 1: + armpmu = &xscale1pmu; + memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, + sizeof(xscale_perf_cache_map)); + perf_max_events = xscale1pmu.num_events; + break; + case 2: + armpmu = &xscale2pmu; + memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, + sizeof(xscale_perf_cache_map)); + perf_max_events = xscale2pmu.num_events; + break; + } + } + + if (armpmu) { + pr_info("enabled with %s PMU driver, %d counters available\n", + arm_pmu_names[armpmu->id], armpmu->num_events); + } else { + pr_info("no hardware support available\n"); + perf_max_events = -1; + } + + return 0; +} +arch_initcall(init_hw_perf_events); + +/* + * Callchain handling code. + */ +static inline void +callchain_store(struct perf_callchain_entry *entry, + u64 ip) +{ + if (entry->nr < PERF_MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +/* + * The registers we're interested in are at the end of the variable + * length saved register structure. The fp points at the end of this + * structure so the address of this struct is: + * (struct frame_tail *)(xxx->fp)-1 + * + * This code has been adapted from the ARM OProfile support. + */ +struct frame_tail { + struct frame_tail *fp; + unsigned long sp; + unsigned long lr; +} __attribute__((packed)); + +/* + * Get the return address for a single stackframe and return a pointer to the + * next frame tail. + */ +static struct frame_tail * +user_backtrace(struct frame_tail *tail, + struct perf_callchain_entry *entry) +{ + struct frame_tail buftail; + + /* Also check accessibility of one struct frame_tail beyond */ + if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) + return NULL; + if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) + return NULL; + + callchain_store(entry, buftail.lr); + + /* + * Frame pointers should strictly progress back up the stack + * (towards higher addresses). + */ + if (tail >= buftail.fp) + return NULL; + + return buftail.fp - 1; +} + +static void +perf_callchain_user(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + struct frame_tail *tail; + + callchain_store(entry, PERF_CONTEXT_USER); + + if (!user_mode(regs)) + regs = task_pt_regs(current); + + tail = (struct frame_tail *)regs->ARM_fp - 1; + + while (tail && !((unsigned long)tail & 0x3)) + tail = user_backtrace(tail, entry); +} + +/* + * Gets called by walk_stackframe() for every stackframe. This will be called + * whist unwinding the stackframe and is like a subroutine return so we use + * the PC. + */ +static int +callchain_trace(struct stackframe *fr, + void *data) +{ + struct perf_callchain_entry *entry = data; + callchain_store(entry, fr->pc); + return 0; +} + +static void +perf_callchain_kernel(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + struct stackframe fr; + + callchain_store(entry, PERF_CONTEXT_KERNEL); + fr.fp = regs->ARM_fp; + fr.sp = regs->ARM_sp; + fr.lr = regs->ARM_lr; + fr.pc = regs->ARM_pc; + walk_stackframe(&fr, callchain_trace, entry); +} + +static void +perf_do_callchain(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + int is_user; + + if (!regs) + return; + + is_user = user_mode(regs); + + if (!current || !current->pid) + return; + + if (is_user && current->state != TASK_RUNNING) + return; + + if (!is_user) + perf_callchain_kernel(regs, entry); + + if (current->mm) + perf_callchain_user(regs, entry); +} + +static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); + +struct perf_callchain_entry * +perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); + + entry->nr = 0; + perf_do_callchain(regs, entry); + return entry; +} diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c new file mode 100644 index 000000000000..b8af96ea62e6 --- /dev/null +++ b/arch/arm/kernel/pmu.c @@ -0,0 +1,142 @@ +/* + * linux/arch/arm/kernel/pmu.c + * + * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles + * Copyright (C) 2010 ARM Ltd, Will Deacon + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#define pr_fmt(fmt) "PMU: " fmt + +#include <linux/cpumask.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#include <asm/pmu.h> + +static volatile long pmu_lock; + +static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; + +static int __devinit pmu_device_probe(struct platform_device *pdev) +{ + + if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) { + pr_warning("received registration request for unknown " + "device %d\n", pdev->id); + return -EINVAL; + } + + if (pmu_devices[pdev->id]) + pr_warning("registering new PMU device type %d overwrites " + "previous registration!\n", pdev->id); + else + pr_info("registered new PMU device of type %d\n", + pdev->id); + + pmu_devices[pdev->id] = pdev; + return 0; +} + +static struct platform_driver pmu_driver = { + .driver = { + .name = "arm-pmu", + }, + .probe = pmu_device_probe, +}; + +static int __init register_pmu_driver(void) +{ + return platform_driver_register(&pmu_driver); +} +device_initcall(register_pmu_driver); + +struct platform_device * +reserve_pmu(enum arm_pmu_type device) +{ + struct platform_device *pdev; + + if (test_and_set_bit_lock(device, &pmu_lock)) { + pdev = ERR_PTR(-EBUSY); + } else if (pmu_devices[device] == NULL) { + clear_bit_unlock(device, &pmu_lock); + pdev = ERR_PTR(-ENODEV); + } else { + pdev = pmu_devices[device]; + } + + return pdev; +} +EXPORT_SYMBOL_GPL(reserve_pmu); + +int +release_pmu(struct platform_device *pdev) +{ + if (WARN_ON(pdev != pmu_devices[pdev->id])) + return -EINVAL; + clear_bit_unlock(pdev->id, &pmu_lock); + return 0; +} +EXPORT_SYMBOL_GPL(release_pmu); + +static int +set_irq_affinity(int irq, + unsigned int cpu) +{ +#ifdef CONFIG_SMP + int err = irq_set_affinity(irq, cpumask_of(cpu)); + if (err) + pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", + irq, cpu); + return err; +#else + return 0; +#endif +} + +static int +init_cpu_pmu(void) +{ + int i, err = 0; + struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; + + if (!pdev) { + err = -ENODEV; + goto out; + } + + for (i = 0; i < pdev->num_resources; ++i) { + err = set_irq_affinity(platform_get_irq(pdev, i), i); + if (err) + break; + } + +out: + return err; +} + +int +init_pmu(enum arm_pmu_type device) +{ + int err = 0; + + switch (device) { + case ARM_PMU_DEVICE_CPU: + err = init_cpu_pmu(); + break; + default: + pr_warning("attempt to initialise unknown device %d\n", + device); + err = -EINVAL; + } + + return err; +} +EXPORT_SYMBOL_GPL(init_pmu); diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index ba2adefa53f7..a4a9cc88bec7 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -16,7 +16,6 @@ #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> -#include <linux/slab.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/reboot.h> @@ -352,23 +351,27 @@ EXPORT_SYMBOL(dump_fpu); /* * Shuffle the argument into the correct register before calling the - * thread function. r1 is the thread argument, r2 is the pointer to - * the thread function, and r3 points to the exit function. + * thread function. r4 is the thread argument, r5 is the pointer to + * the thread function, and r6 points to the exit function. */ extern void kernel_thread_helper(void); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_helper, #function\n" "kernel_thread_helper:\n" -" mov r0, r1\n" -" mov lr, r3\n" -" mov pc, r2\n" +#ifdef CONFIG_TRACE_IRQFLAGS +" bl trace_hardirqs_on\n" +#endif +" msr cpsr_c, r7\n" +" mov r0, r4\n" +" mov lr, r6\n" +" mov pc, r5\n" " .size kernel_thread_helper, . - kernel_thread_helper\n" -" .previous"); +" .popsection"); #ifdef CONFIG_ARM_UNWIND extern void kernel_thread_exit(long code); -asm( ".section .text\n" +asm( ".pushsection .text\n" " .align\n" " .type kernel_thread_exit, #function\n" "kernel_thread_exit:\n" @@ -378,7 +381,7 @@ asm( ".section .text\n" " nop\n" " .fnend\n" " .size kernel_thread_exit, . - kernel_thread_exit\n" -" .previous"); +" .popsection"); #else #define kernel_thread_exit do_exit #endif @@ -392,11 +395,12 @@ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) memset(®s, 0, sizeof(regs)); - regs.ARM_r1 = (unsigned long)arg; - regs.ARM_r2 = (unsigned long)fn; - regs.ARM_r3 = (unsigned long)kernel_thread_exit; + regs.ARM_r4 = (unsigned long)arg; + regs.ARM_r5 = (unsigned long)fn; + regs.ARM_r6 = (unsigned long)kernel_thread_exit; + regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; regs.ARM_pc = (unsigned long)kernel_thread_helper; - regs.ARM_cpsr = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE; + regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT; return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); } diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index a2ea3854cb3c..3f562a7c0a99 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -452,12 +452,23 @@ void ptrace_cancel_bpt(struct task_struct *child) clear_breakpoint(child, &child->thread.debug.bp[i]); } +void user_disable_single_step(struct task_struct *task) +{ + task->ptrace &= ~PT_SINGLESTEP; + ptrace_cancel_bpt(task); +} + +void user_enable_single_step(struct task_struct *task) +{ + task->ptrace |= PT_SINGLESTEP; +} + /* * Called by kernel/ptrace.c when detaching.. */ void ptrace_disable(struct task_struct *child) { - single_step_disable(child); + user_disable_single_step(child); } /* @@ -499,10 +510,41 @@ static struct undef_hook thumb_break_hook = { .fn = break_trap, }; +static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) +{ + unsigned int instr2; + void __user *pc; + + /* Check the second half of the instruction. */ + pc = (void __user *)(instruction_pointer(regs) + 2); + + if (processor_mode(regs) == SVC_MODE) { + instr2 = *(u16 *) pc; + } else { + get_user(instr2, (u16 __user *)pc); + } + + if (instr2 == 0xa000) { + ptrace_break(current, regs); + return 0; + } else { + return 1; + } +} + +static struct undef_hook thumb2_break_hook = { + .instr_mask = 0xffff, + .instr_val = 0xf7f0, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = PSR_T_BIT, + .fn = thumb2_break_trap, +}; + static int __init ptrace_break_init(void) { register_undef_hook(&arm_break_hook); register_undef_hook(&thumb_break_hook); + register_undef_hook(&thumb2_break_hook); return 0; } @@ -669,7 +711,7 @@ static int ptrace_getvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_to_user(&ufp->fpregs, &vfp->hard.fpregs, @@ -692,7 +734,7 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) union vfp_state *vfp = &thread->vfpstate; struct user_vfp __user *ufp = data; - vfp_sync_state(thread); + vfp_sync_hwstate(thread); /* copy the floating point registers */ if (copy_from_user(&vfp->hard.fpregs, &ufp->fpregs, @@ -703,6 +745,8 @@ static int ptrace_setvfpregs(struct task_struct *tsk, void __user *data) if (get_user(vfp->hard.fpscr, &ufp->fpscr)) return -EFAULT; + vfp_flush_hwstate(thread); + return 0; } #endif @@ -712,77 +756,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) int ret; switch (request) { - /* - * read word at location "addr" in the child process. - */ - case PTRACE_PEEKTEXT: - case PTRACE_PEEKDATA: - ret = generic_ptrace_peekdata(child, addr, data); - break; - case PTRACE_PEEKUSR: ret = ptrace_read_user(child, addr, (unsigned long __user *)data); break; - /* - * write the word at location addr. - */ - case PTRACE_POKETEXT: - case PTRACE_POKEDATA: - ret = generic_ptrace_pokedata(child, addr, data); - break; - case PTRACE_POKEUSR: ret = ptrace_write_user(child, addr, data); break; - /* - * continue/restart and stop at next (return from) syscall - */ - case PTRACE_SYSCALL: - case PTRACE_CONT: - ret = -EIO; - if (!valid_signal(data)) - break; - if (request == PTRACE_SYSCALL) - set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - else - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - single_step_disable(child); - wake_up_process(child); - ret = 0; - break; - - /* - * make the child exit. Best I can do is send it a sigkill. - * perhaps it should be put in the status that it wants to - * exit. - */ - case PTRACE_KILL: - single_step_disable(child); - if (child->exit_state != EXIT_ZOMBIE) { - child->exit_code = SIGKILL; - wake_up_process(child); - } - ret = 0; - break; - - /* - * execute single instruction. - */ - case PTRACE_SINGLESTEP: - ret = -EIO; - if (!valid_signal(data)) - break; - single_step_enable(child); - clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); - child->exit_code = data; - /* give it a chance to run. */ - wake_up_process(child); - ret = 0; - break; - case PTRACE_GETREGS: ret = ptrace_getregs(child, (void __user *)data); break; diff --git a/arch/arm/kernel/ptrace.h b/arch/arm/kernel/ptrace.h index def3b6184a79..3926605b82ea 100644 --- a/arch/arm/kernel/ptrace.h +++ b/arch/arm/kernel/ptrace.h @@ -14,20 +14,6 @@ extern void ptrace_set_bpt(struct task_struct *); extern void ptrace_break(struct task_struct *, struct pt_regs *); /* - * make sure single-step breakpoint is gone. - */ -static inline void single_step_disable(struct task_struct *task) -{ - task->ptrace &= ~PT_SINGLESTEP; - ptrace_cancel_bpt(task); -} - -static inline void single_step_enable(struct task_struct *task) -{ - task->ptrace |= PT_SINGLESTEP; -} - -/* * Send SIGTRAP if we're single-stepping */ static inline void single_step_trap(struct task_struct *task) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c6c57b640b6b..122d999bdc7c 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -24,6 +24,7 @@ #include <linux/interrupt.h> #include <linux/smp.h> #include <linux/fs.h> +#include <linux/proc_fs.h> #include <asm/unified.h> #include <asm/cpu.h> @@ -102,6 +103,7 @@ struct cpu_cache_fns cpu_cache; #endif #ifdef CONFIG_OUTER_CACHE struct outer_cache_fns outer_cache; +EXPORT_SYMBOL(outer_cache); #endif struct stack { @@ -117,7 +119,7 @@ EXPORT_SYMBOL(elf_platform); static const char *cpu_name; static const char *machine_name; -static char __initdata command_line[COMMAND_LINE_SIZE]; +static char __initdata cmd_line[COMMAND_LINE_SIZE]; static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE; static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } }; @@ -417,10 +419,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ -static void __init early_mem(char **p) +static int __init early_mem(char *p) { static int usermem __initdata = 0; unsigned long size, start; + char *endp; /* * If the user specifies memory size, we @@ -433,52 +436,15 @@ static void __init early_mem(char **p) } start = PHYS_OFFSET; - size = memparse(*p, p); - if (**p == '@') - start = memparse(*p + 1, p); + size = memparse(p, &endp); + if (*endp == '@') + start = memparse(endp + 1, NULL); arm_add_memory(start, size); -} -__early_param("mem=", early_mem); -/* - * Initial parsing of the command line. - */ -static void __init parse_cmdline(char **cmdline_p, char *from) -{ - char c = ' ', *to = command_line; - int len = 0; - - for (;;) { - if (c == ' ') { - extern struct early_params __early_begin, __early_end; - struct early_params *p; - - for (p = &__early_begin; p < &__early_end; p++) { - int arglen = strlen(p->arg); - - if (memcmp(from, p->arg, arglen) == 0) { - if (to != command_line) - to -= 1; - from += arglen; - p->fn(&from); - - while (*from != ' ' && *from != '\0') - from++; - break; - } - } - } - c = *from++; - if (!c) - break; - if (COMMAND_LINE_SIZE <= ++len) - break; - *to++ = c; - } - *to = '\0'; - *cmdline_p = command_line; + return 0; } +early_param("mem", early_mem); static void __init setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) @@ -627,6 +593,7 @@ static int __init parse_tag_revision(const struct tag *tag) __tagtable(ATAG_REVISION, parse_tag_revision); +#ifndef CONFIG_CMDLINE_FORCE static int __init parse_tag_cmdline(const struct tag *tag) { strlcpy(default_command_line, tag->u.cmdline.cmdline, COMMAND_LINE_SIZE); @@ -634,6 +601,7 @@ static int __init parse_tag_cmdline(const struct tag *tag) } __tagtable(ATAG_CMDLINE, parse_tag_cmdline); +#endif /* CONFIG_CMDLINE_FORCE */ /* * Scan the tag table for this tag, and call its parse function. @@ -739,9 +707,15 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; - memcpy(boot_command_line, from, COMMAND_LINE_SIZE); - boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; - parse_cmdline(cmdline_p, from); + /* parse_early_param needs a boot_command_line */ + strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + + /* populate cmd_line too for later use, preserving boot_command_line */ + strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = cmd_line; + + parse_early_param(); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); @@ -782,9 +756,21 @@ static int __init topology_init(void) return 0; } - subsys_initcall(topology_init); +#ifdef CONFIG_HAVE_PROC_CPU +static int __init proc_cpu_init(void) +{ + struct proc_dir_entry *res; + + res = proc_mkdir("cpu", NULL); + if (!res) + return -ENOMEM; + return 0; +} +fs_initcall(proc_cpu_init); +#endif + static const char *hwcap_str[] = { "swp", "half", diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index e7714f367eb8..907d5a620bca 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -18,6 +18,7 @@ #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/unistd.h> +#include <asm/vfp.h> #include "ptrace.h" #include "signal.h" @@ -175,6 +176,90 @@ static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame) #endif +#ifdef CONFIG_VFP + +static int preserve_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + const unsigned long magic = VFP_MAGIC; + const unsigned long size = VFP_STORAGE_SIZE; + int err = 0; + + vfp_sync_hwstate(thread); + __put_user_error(magic, &frame->magic, err); + __put_user_error(size, &frame->size, err); + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_to_user(&frame->ufp.fpregs, &h->fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __put_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Copy the exception registers. + */ + __put_user_error(h->fpexc, &frame->ufp_exc.fpexc, err); + __put_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __put_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + return err ? -EFAULT : 0; +} + +static int restore_vfp_context(struct vfp_sigframe __user *frame) +{ + struct thread_info *thread = current_thread_info(); + struct vfp_hard_struct *h = &thread->vfpstate.hard; + unsigned long magic; + unsigned long size; + unsigned long fpexc; + int err = 0; + + __get_user_error(magic, &frame->magic, err); + __get_user_error(size, &frame->size, err); + + if (err) + return -EFAULT; + if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE) + return -EINVAL; + + /* + * Copy the floating point registers. There can be unused + * registers see asm/hwcap.h for details. + */ + err |= __copy_from_user(&h->fpregs, &frame->ufp.fpregs, + sizeof(h->fpregs)); + /* + * Copy the status and control register. + */ + __get_user_error(h->fpscr, &frame->ufp.fpscr, err); + + /* + * Sanitise and restore the exception registers. + */ + __get_user_error(fpexc, &frame->ufp_exc.fpexc, err); + /* Ensure the VFP is enabled. */ + fpexc |= FPEXC_EN; + /* Ensure FPINST2 is invalid and the exception flag is cleared. */ + fpexc &= ~(FPEXC_EX | FPEXC_FP2V); + h->fpexc = fpexc; + + __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err); + __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err); + + if (!err) + vfp_flush_hwstate(thread); + + return err ? -EFAULT : 0; +} + +#endif + /* * Do a signal return; undo the signal stack. These are aligned to 64-bit. */ @@ -233,8 +318,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf) err |= restore_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_restore_state(&sf->aux.vfp); + if (err == 0) + err |= restore_vfp_context(&aux->vfp); #endif return err; @@ -348,8 +433,8 @@ setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set) err |= preserve_iwmmxt_context(&aux->iwmmxt); #endif #ifdef CONFIG_VFP -// if (err == 0) -// err |= vfp_save_state(&sf->aux.vfp); + if (err == 0) + err |= preserve_vfp_context(&aux->vfp); #endif __put_user_error(0, &aux->end_magic, err); diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 57162af53dc9..b8c3d0f689d9 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -86,6 +86,12 @@ int __cpuinit __cpu_up(unsigned int cpu) return PTR_ERR(idle); } ci->idle = idle; + } else { + /* + * Since this idle thread is being re-used, call + * init_idle() to reinitialize the thread structure. + */ + init_idle(idle, cpu); } /* @@ -99,6 +105,7 @@ int __cpuinit __cpu_up(unsigned int cpu) *pmd = __pmd((PHYS_OFFSET & PGDIR_MASK) | PMD_TYPE_SECT | PMD_SECT_AP_WRITE); flush_pmd_entry(pmd); + outer_clean_range(__pa(pmd), __pa(pmd + 1)); /* * We need to tell the secondary core where to find @@ -106,7 +113,8 @@ int __cpuinit __cpu_up(unsigned int cpu) */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; secondary_data.pgdir = virt_to_phys(pgd); - wmb(); + __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); + outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); /* * Now bring the CPU into our world. @@ -160,7 +168,7 @@ int __cpu_disable(void) struct task_struct *p; int ret; - ret = mach_cpu_disable(cpu); + ret = platform_cpu_disable(cpu); if (ret) return ret; diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index ea02a7b1c244..7c5f0c024db7 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -21,23 +21,6 @@ #include <asm/smp_twd.h> #include <asm/hardware/gic.h> -#define TWD_TIMER_LOAD 0x00 -#define TWD_TIMER_COUNTER 0x04 -#define TWD_TIMER_CONTROL 0x08 -#define TWD_TIMER_INTSTAT 0x0C - -#define TWD_WDOG_LOAD 0x20 -#define TWD_WDOG_COUNTER 0x24 -#define TWD_WDOG_CONTROL 0x28 -#define TWD_WDOG_INTSTAT 0x2C -#define TWD_WDOG_RESETSTAT 0x30 -#define TWD_WDOG_DISABLE 0x34 - -#define TWD_TIMER_CONTROL_ENABLE (1 << 0) -#define TWD_TIMER_CONTROL_ONESHOT (0 << 1) -#define TWD_TIMER_CONTROL_PERIODIC (1 << 1) -#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2) - /* set up by the platform code */ void __iomem *twd_base; diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index ae4027bd01bd..c23501842b98 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -15,7 +15,6 @@ #include <linux/module.h> #include <linux/errno.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/sem.h> #include <linux/msg.h> @@ -27,135 +26,7 @@ #include <linux/file.h> #include <linux/ipc.h> #include <linux/uaccess.h> - -struct mmap_arg_struct { - unsigned long addr; - unsigned long len; - unsigned long prot; - unsigned long flags; - unsigned long fd; - unsigned long offset; -}; - -asmlinkage int old_mmap(struct mmap_arg_struct __user *arg) -{ - int error = -EFAULT; - struct mmap_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - goto out; - - error = -EINVAL; - if (a.offset & ~PAGE_MASK) - goto out; - - error = sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT); -out: - return error; -} - -/* - * Perform the select(nd, in, out, ex, tv) and mmap() system - * calls. - */ - -struct sel_arg_struct { - unsigned long n; - fd_set __user *inp, *outp, *exp; - struct timeval __user *tvp; -}; - -asmlinkage int old_select(struct sel_arg_struct __user *arg) -{ - struct sel_arg_struct a; - - if (copy_from_user(&a, arg, sizeof(a))) - return -EFAULT; - /* sys_select() does the appropriate kernel locking */ - return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp); -} - -#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT) -/* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * - * This is really horribly ugly. - */ -asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth) -{ - int version, ret; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMOP: - return sys_semtimedop (first, (struct sembuf __user *)ptr, second, NULL); - case SEMTIMEDOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, second, - (const struct timespec __user *)fifth); - - case SEMGET: - return sys_semget (first, second, third); - case SEMCTL: { - union semun fourth; - if (!ptr) - return -EINVAL; - if (get_user(fourth.__pad, (void __user * __user *) ptr)) - return -EFAULT; - return sys_semctl (first, second, third, fourth); - } - - case MSGSND: - return sys_msgsnd(first, (struct msgbuf __user *) ptr, - second, third); - case MSGRCV: - switch (version) { - case 0: { - struct ipc_kludge tmp; - if (!ptr) - return -EINVAL; - if (copy_from_user(&tmp,(struct ipc_kludge __user *)ptr, - sizeof (tmp))) - return -EFAULT; - return sys_msgrcv (first, tmp.msgp, second, - tmp.msgtyp, third); - } - default: - return sys_msgrcv (first, - (struct msgbuf __user *) ptr, - second, fifth, third); - } - case MSGGET: - return sys_msgget ((key_t) first, second); - case MSGCTL: - return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); - - case SHMAT: - switch (version) { - default: { - ulong raddr; - ret = do_shmat(first, (char __user *)ptr, second, &raddr); - if (ret) - return ret; - return put_user(raddr, (ulong __user *)third); - } - case 1: /* Of course, we don't support iBCS2! */ - return -EINVAL; - } - case SHMDT: - return sys_shmdt ((char __user *)ptr); - case SHMGET: - return sys_shmget (first, second, third); - case SHMCTL: - return sys_shmctl (first, second, - (struct shmid_ds __user *) ptr); - default: - return -ENOSYS; - } -} -#endif +#include <linux/slab.h> /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper diff --git a/arch/arm/kernel/sys_oabi-compat.c b/arch/arm/kernel/sys_oabi-compat.c index d59a0cd537f0..33ff678e32f2 100644 --- a/arch/arm/kernel/sys_oabi-compat.c +++ b/arch/arm/kernel/sys_oabi-compat.c @@ -346,9 +346,6 @@ asmlinkage long sys_oabi_semop(int semid, struct oabi_sembuf __user *tsops, return sys_oabi_semtimedop(semid, tsops, nsops, NULL); } -extern asmlinkage int sys_ipc(uint call, int first, int second, int third, - void __user *ptr, long fifth); - asmlinkage int sys_oabi_ipc(uint call, int first, int second, int third, void __user *ptr, long fifth) { diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c index d38cdf2c8276..38c261f9951c 100644 --- a/arch/arm/kernel/time.c +++ b/arch/arm/kernel/time.c @@ -10,11 +10,6 @@ * * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... - * - * 1994-07-02 Alan Modra - * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime - * 1998-12-20 Updated NTP code according to technical memorandum Jan '96 - * "A Kernel Model for Precision Timekeeping" by Dave Mills */ #include <linux/module.h> #include <linux/kernel.h> @@ -77,151 +72,15 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif -/* - * hook for setting the RTC's idea of the current time. - */ -int (*set_rtc)(void); - -#ifndef CONFIG_GENERIC_TIME -static unsigned long dummy_gettimeoffset(void) -{ - return 0; -} -#endif - -static unsigned long next_rtc_update; - -/* - * If we have an externally synchronized linux clock, then update - * CMOS clock accordingly every ~11 minutes. set_rtc() has to be - * called as close as possible to 500 ms before the new second - * starts. - */ -static inline void do_set_rtc(void) -{ - if (!ntp_synced() || set_rtc == NULL) - return; - - if (next_rtc_update && - time_before((unsigned long)xtime.tv_sec, next_rtc_update)) - return; - - if (xtime.tv_nsec < 500000000 - ((unsigned) tick_nsec >> 1) && - xtime.tv_nsec >= 500000000 + ((unsigned) tick_nsec >> 1)) - return; - - if (set_rtc()) - /* - * rtc update failed. Try again in 60s - */ - next_rtc_update = xtime.tv_sec + 60; - else - next_rtc_update = xtime.tv_sec + 660; -} - -#ifdef CONFIG_LEDS - -static void dummy_leds_event(led_event_t evt) -{ -} - -void (*leds_event)(led_event_t) = dummy_leds_event; - -struct leds_evt_name { - const char name[8]; - int on; - int off; -}; - -static const struct leds_evt_name evt_names[] = { - { "amber", led_amber_on, led_amber_off }, - { "blue", led_blue_on, led_blue_off }, - { "green", led_green_on, led_green_off }, - { "red", led_red_on, led_red_off }, -}; - -static ssize_t leds_store(struct sys_device *dev, - struct sysdev_attribute *attr, - const char *buf, size_t size) +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +u32 arch_gettimeoffset(void) { - int ret = -EINVAL, len = strcspn(buf, " "); - - if (len > 0 && buf[len] == '\0') - len--; - - if (strncmp(buf, "claim", len) == 0) { - leds_event(led_claim); - ret = size; - } else if (strncmp(buf, "release", len) == 0) { - leds_event(led_release); - ret = size; - } else { - int i; - - for (i = 0; i < ARRAY_SIZE(evt_names); i++) { - if (strlen(evt_names[i].name) != len || - strncmp(buf, evt_names[i].name, len) != 0) - continue; - if (strncmp(buf+len, " on", 3) == 0) { - leds_event(evt_names[i].on); - ret = size; - } else if (strncmp(buf+len, " off", 4) == 0) { - leds_event(evt_names[i].off); - ret = size; - } - break; - } - } - return ret; -} - -static SYSDEV_ATTR(event, 0200, NULL, leds_store); - -static int leds_suspend(struct sys_device *dev, pm_message_t state) -{ - leds_event(led_stop); - return 0; -} - -static int leds_resume(struct sys_device *dev) -{ - leds_event(led_start); - return 0; -} + if (system_timer->offset != NULL) + return system_timer->offset() * 1000; -static int leds_shutdown(struct sys_device *dev) -{ - leds_event(led_halted); return 0; } - -static struct sysdev_class leds_sysclass = { - .name = "leds", - .shutdown = leds_shutdown, - .suspend = leds_suspend, - .resume = leds_resume, -}; - -static struct sys_device leds_device = { - .id = 0, - .cls = &leds_sysclass, -}; - -static int __init leds_init(void) -{ - int ret; - ret = sysdev_class_register(&leds_sysclass); - if (ret == 0) - ret = sysdev_register(&leds_device); - if (ret == 0) - ret = sysdev_create_file(&leds_device, &attr_event); - return ret; -} - -device_initcall(leds_init); - -EXPORT_SYMBOL(leds_event); -#endif +#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ #ifdef CONFIG_LEDS_TIMER static inline void do_leds(void) @@ -237,96 +96,6 @@ static inline void do_leds(void) #define do_leds() #endif -#ifndef CONFIG_GENERIC_TIME -void do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - unsigned long seq; - unsigned long usec, sec; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - usec = system_timer->offset(); - sec = xtime.tv_sec; - usec += xtime.tv_nsec / 1000; - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - /* usec may have gone up a lot: be safe */ - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) -{ - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; - - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - write_seqlock_irq(&xtime_lock); - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * done, and then undo it! - */ - nsec -= system_timer->offset() * NSEC_PER_USEC; - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); - - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); - - ntp_clear(); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - return 0; -} - -EXPORT_SYMBOL(do_settimeofday); -#endif /* !CONFIG_GENERIC_TIME */ - -/** - * save_time_delta - Save the offset between system time and RTC time - * @delta: pointer to timespec to store delta - * @rtc: pointer to timespec for current RTC time - * - * Return a delta between the system time and the RTC time, such - * that system time can be restored later with restore_time_delta() - */ -void save_time_delta(struct timespec *delta, struct timespec *rtc) -{ - set_normalized_timespec(delta, - xtime.tv_sec - rtc->tv_sec, - xtime.tv_nsec - rtc->tv_nsec); -} -EXPORT_SYMBOL(save_time_delta); - -/** - * restore_time_delta - Restore the current system time - * @delta: delta returned by save_time_delta() - * @rtc: pointer to timespec for current RTC time - */ -void restore_time_delta(struct timespec *delta, struct timespec *rtc) -{ - struct timespec ts; - - set_normalized_timespec(&ts, - delta->tv_sec + rtc->tv_sec, - delta->tv_nsec + rtc->tv_nsec); - - do_settimeofday(&ts); -} -EXPORT_SYMBOL(restore_time_delta); #ifndef CONFIG_GENERIC_CLOCKEVENTS /* @@ -336,7 +105,6 @@ void timer_tick(void) { profile_tick(CPU_PROFILING); do_leds(); - do_set_rtc(); write_seqlock(&xtime_lock); do_timer(1); write_sequnlock(&xtime_lock); @@ -392,10 +160,6 @@ device_initcall(timer_init_sysfs); void __init time_init(void) { -#ifndef CONFIG_GENERIC_TIME - if (system_timer->offset == NULL) - system_timer->offset = dummy_gettimeoffset; -#endif system_timer->init(); } diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 3f361a783f43..1621e5327b2a 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -12,15 +12,17 @@ * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably * kill the offending process. */ -#include <linux/module.h> #include <linux/signal.h> -#include <linux/spinlock.h> #include <linux/personality.h> #include <linux/kallsyms.h> -#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/uaccess.h> #include <linux/hardirq.h> +#include <linux/kdebug.h> +#include <linux/module.h> +#include <linux/kexec.h> +#include <linux/delay.h> #include <linux/init.h> -#include <linux/uaccess.h> #include <asm/atomic.h> #include <asm/cacheflush.h> @@ -224,14 +226,21 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #define S_SMP "" #endif -static void __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) +static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs) { struct task_struct *tsk = thread->task; static int die_counter; + int ret; printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n", str, err, ++die_counter); sysfs_printk_last_file(); + + /* trap and error numbers are mostly meaningless on ARM */ + ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); + if (ret == NOTIFY_STOP) + return ret; + print_modules(); __show_regs(regs); printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", @@ -243,6 +252,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p dump_backtrace(regs, tsk); dump_instr(KERN_EMERG, regs); } + + return ret; } DEFINE_SPINLOCK(die_lock); @@ -250,16 +261,21 @@ DEFINE_SPINLOCK(die_lock); /* * This function is protected against re-entrancy. */ -NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) +void die(const char *str, struct pt_regs *regs, int err) { struct thread_info *thread = current_thread_info(); + int ret; oops_enter(); spin_lock_irq(&die_lock); console_verbose(); bust_spinlocks(1); - __die(str, err, thread, regs); + ret = __die(str, err, thread, regs); + + if (regs && kexec_should_crash(thread->task)) + crash_kexec(regs); + bust_spinlocks(0); add_taint(TAINT_DIE); spin_unlock_irq(&die_lock); @@ -267,11 +283,10 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err) if (in_interrupt()) panic("Fatal exception in interrupt"); - if (panic_on_oops) panic("Fatal exception"); - - do_exit(SIGSEGV); + if (ret != NOTIFY_STOP) + do_exit(SIGSEGV); } void arm_notify_die(const char *str, struct pt_regs *regs, diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 786ac2b6914a..dd81a918c106 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -26,6 +26,7 @@ * http://infocenter.arm.com/help/topic/com.arm.doc.subset.swdev.abi/index.html */ +#ifndef __CHECKER__ #if !defined (__ARM_EABI__) #warning Your compiler does not have EABI support. #warning ARM unwind is known to compile only with EABI compilers. @@ -34,6 +35,7 @@ #warning Your compiler is too buggy; it is known to not compile ARM unwind support. #warning Change compiler or disable ARM_UNWIND option. #endif +#endif /* __CHECKER__ */ #include <linux/kernel.h> #include <linux/init.h> @@ -359,7 +361,9 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) frame.fp = regs->ARM_fp; frame.sp = regs->ARM_sp; frame.lr = regs->ARM_lr; - frame.pc = regs->ARM_pc; + /* PC might be corrupted, use LR in that case. */ + frame.pc = kernel_text_address(regs->ARM_pc) + ? regs->ARM_pc : regs->ARM_lr; } else if (tsk == current) { frame.fp = (unsigned long)__builtin_frame_address(0); frame.sp = current_sp; diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4957e13ef55b..b16c07914b55 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -43,10 +43,6 @@ SECTIONS INIT_SETUP(16) - __early_begin = .; - *(.early_param.init) - __early_end = .; - INIT_CALLS CON_INITCALL SECURITY_INITCALL |